From: tzachid Date: Wed, 19 Oct 2005 16:45:05 +0000 (+0000) Subject: Added low level driver X-Git-Url: https://openfabrics.org/gitweb/?a=commitdiff_plain;h=54d9710e775b132a17f47c21ea7132cf70c1e8f2;p=~shefty%2Frdma-win.git Added low level driver git-svn-id: svn://openib.tc.cornell.edu/gen1@126 ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86 --- diff --git a/branches/MTHCA/hw/mthca/dirs b/branches/MTHCA/hw/mthca/dirs new file mode 100644 index 00000000..aa698135 --- /dev/null +++ b/branches/MTHCA/hw/mthca/dirs @@ -0,0 +1,3 @@ +DIRS=\ + kernel \ + user diff --git a/branches/MTHCA/hw/mthca/kernel/Makefile b/branches/MTHCA/hw/mthca/kernel/Makefile new file mode 100644 index 00000000..9c985f57 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/Makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the Windows NT DDK +# + +!INCLUDE $(NTMAKEENV)\makefile.def diff --git a/branches/MTHCA/hw/mthca/kernel/SOURCES b/branches/MTHCA/hw/mthca/kernel/SOURCES new file mode 100644 index 00000000..78157a2b --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/SOURCES @@ -0,0 +1,63 @@ +TRUNK=..\..\.. + +TARGETNAME=mthca +TARGETPATH=$(TRUNK)\bin\kernel\obj$(BUILD_ALT_DIR) +TARGETTYPE=DRIVER + +SOURCES= \ + hca.rc \ + hca_driver.c \ + hca_data.c \ + hca_pci.c \ + hca_pnp.c \ + hca_verbs.c \ + hca_mcast.c \ + hca_direct.c \ + hca_memory.c \ + hca_smp.c \ + \ + mt_l2w.c \ + mt_memory.c \ + mt_cache.c \ + mt_packer.c \ + mt_ud_header.c \ + mt_device.c \ + mt_verbs.c \ + \ + mthca_allocator.c \ + mthca_av.c \ + mthca_cmd.c \ + mthca_cq.c \ + mthca_eq.c \ + mthca_main.c \ + mthca_memfree.c \ + mthca_mr.c \ + mthca_mcg.c \ + mthca_mad.c \ + mthca_pd.c \ + mthca_profile.c \ + mthca_provider.c \ + mthca_qp.c \ + mthca_srq.c \ + mthca_uar.c \ + +!if 0 + + +!endif + + +INCLUDES=\ + $(TRUNK)\inc\kernel\mthca; \ + $(TRUNK)\inc; \ + $(TRUNK)\inc\kernel; \ + $(TRUNK)\inc\complib; \ + +C_DEFINES=$(C_DEFINES) -DDRIVER -DDEPRECATE_DDK_FUNCTIONS -D__LITTLE_ENDIAN + +TARGETLIBS= \ + $(TARGETPATH)\*\complib.lib \ + $(TARGETPATH)\*\ibal.lib \ + $(DDK_LIB_PATH)\wdmguid.lib + +MSC_WARNING_LEVEL= /W3 diff --git a/branches/MTHCA/hw/mthca/kernel/hca.rc b/branches/MTHCA/hw/mthca/kernel/hca.rc new file mode 100644 index 00000000..d9460cc3 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/hca.rc @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_DRV +#define VER_FILESUBTYPE VFT2_UNKNOWN +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "InfiniServ Tavor HCA Driver (checked)" +#else +#define VER_FILEDESCRIPTION_STR "InfiniServ Tavor HCA Driver" +#endif +#define VER_INTERNALNAME_STR "hca.sys" +#define VER_ORIGINALFILENAME_STR "hca.sys" +#include diff --git a/branches/MTHCA/hw/mthca/kernel/hca_data.c b/branches/MTHCA/hw/mthca/kernel/hca_data.c new file mode 100644 index 00000000..758c09e5 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/hca_data.c @@ -0,0 +1,2293 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: hca_data.c 148 2005-07-12 07:48:46Z sleybo $ + */ + + +#include "hca_data.h" +#include "hca_debug.h" +#include "mthca_provider.h" + +static cl_spinlock_t hob_lock; + +#if 1 +u_int32_t g_mlnx_dbg_lvl = CL_DBG_ERROR | MLNX_DBG_TRACE; +#else +u_int32_t g_mlnx_dbg_lvl = CL_DBG_ERROR | + MLNX_DBG_QPN | + MLNX_DBG_MEM | + MLNX_DBG_INFO | + MLNX_DBG_TRACE | + // MLNX_DBG_DIRECT | + 0; +#endif + +u_int32_t g_mlnx_dpc2thread = 0; + + +cl_qlist_t mlnx_hca_list; + +mlnx_hob_t mlnx_hob_array[MLNX_NUM_HOBKL]; // kernel HOB - one per HCA (cmdif access) +mlnx_hobul_t *mlnx_hobul_array[MLNX_NUM_HOBUL]; // kernel HOBUL - one per HCA (kar access) + +#ifdef WIN_TO_BE_CHANGED + +static void +mlnx_async_dpc( + IN cl_async_proc_item_t *async_item_p ); + +#if MLNX_COMP_MODEL +static void +mlnx_comp_dpc( + IN PRKDPC p_dpc, + IN void *context, + IN void *pfn_comp_cb, + IN void *unused ); +#else +static void +mlnx_comp_dpc( + IN cl_async_proc_item_t *async_item_p ); +#endif + +// ### Callback Interface +static void +mlnx_comp_cb( + IN HH_hca_hndl_t hh_hndl, + IN HH_cq_hndl_t hh_cq, + IN void *private_data); + +static void +mlnx_async_cb( + IN HH_hca_hndl_t hh_hndl, + IN HH_event_record_t *hh_er_p, + IN void *private_data); + +#endif + + +///////////////////////////////////////////////////////// +// ### HCA +///////////////////////////////////////////////////////// +void +mlnx_hca_insert( + IN mlnx_hca_t *p_hca ) +{ + cl_spinlock_acquire( &hob_lock ); + cl_qlist_insert_tail( &mlnx_hca_list, &p_hca->list_item ); + cl_spinlock_release( &hob_lock ); +} + +void +mlnx_hca_remove( + IN mlnx_hca_t *p_hca ) +{ + cl_spinlock_acquire( &hob_lock ); + cl_qlist_remove_item( &mlnx_hca_list, &p_hca->list_item ); + cl_spinlock_release( &hob_lock ); +} + +mlnx_hca_t* +mlnx_hca_from_guid( + IN ib_net64_t guid ) +{ + cl_list_item_t *p_item; + mlnx_hca_t *p_hca = NULL; + + cl_spinlock_acquire( &hob_lock ); + p_item = cl_qlist_head( &mlnx_hca_list ); + while( p_item != cl_qlist_end( &mlnx_hca_list ) ) + { + p_hca = PARENT_STRUCT( p_item, mlnx_hca_t, list_item ); + if( p_hca->guid == guid ) + break; + p_item = cl_qlist_next( p_item ); + p_hca = NULL; + } + cl_spinlock_release( &hob_lock ); + return p_hca; +} + +/* +void +mlnx_names_from_guid( + IN ib_net64_t guid, + OUT char **hca_name_p, + OUT char **dev_name_p) +{ + unsigned int idx; + + if (!hca_name_p) return; + if (!dev_name_p) return; + + for (idx = 0; idx < mlnx_num_hca; idx++) + { + if (mlnx_hca_array[idx].ifx.guid == guid) + { + *hca_name_p = mlnx_hca_array[idx].hca_name_p; + *dev_name_p = mlnx_hca_array[idx].dev_name_p; + } + } +} +*/ + +///////////////////////////////////////////////////////// +// ### HCA +///////////////////////////////////////////////////////// +cl_status_t +mlnx_hcas_init( void ) +{ + u_int32_t idx; + + cl_qlist_init( &mlnx_hca_list ); + return cl_spinlock_init( &hob_lock ); +} + +#ifdef WIN_TO_BE_REMOVED +///////////////////////////////////////////////////////// +// ### HOB +///////////////////////////////////////////////////////// +cl_status_t +mlnx_hobs_init( void ) +{ + u_int32_t idx; + + cl_qlist_init( &mlnx_hca_list ); + + for (idx = 0; idx < MLNX_NUM_HOBKL; idx++) + { +#ifdef WIN_TO_BE_CHANGED + mlnx_hob_array[idx].hh_hndl = NULL; +#endif + mlnx_hob_array[idx].comp_cb_p = NULL; + mlnx_hob_array[idx].async_cb_p = NULL; + mlnx_hob_array[idx].ca_context = NULL; + mlnx_hob_array[idx].async_proc_mgr_p = NULL; + mlnx_hob_array[idx].cl_device_h = NULL; + // mlnx_hob_array[idx].port_lmc_p = NULL; + mlnx_hob_array[idx].index = idx; + mlnx_hob_array[idx].mark = E_MARK_INVALID; + } + return cl_spinlock_init( &hob_lock ); +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_hobs_insert( + IN mlnx_hca_t *p_hca, + OUT mlnx_hob_t **hob_pp) +{ + u_int32_t idx; + ib_api_status_t status = IB_ERROR; + mlnx_cache_t *p_cache; + + p_cache = (mlnx_cache_t*)cl_pzalloc( sizeof(mlnx_cache_t) * 2 ); + if( !p_cache ) + return IB_INSUFFICIENT_MEMORY; + + cl_spinlock_acquire(&hob_lock); + for (idx = 0; idx < MLNX_NUM_HOBKL; idx++) + { + if (!mlnx_hob_array[idx].hh_hndl) + { +#ifdef WIN_TO_BE_CHANGED + mlnx_hob_array[idx].hh_hndl = p_hca->hh_hndl; +#endif + mlnx_hob_array[idx].mark = E_MARK_CA; + if (hob_pp) *hob_pp = &mlnx_hob_array[idx]; + status = IB_SUCCESS; + break; + } + } + cl_spinlock_release(&hob_lock); + + if (IB_SUCCESS == status) + (*hob_pp)->cache = p_cache; + else + cl_free( p_cache ); + + return status; +} + +#endif + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_hobs_set_cb( + IN mlnx_hob_t *hob_p, + IN ci_completion_cb_t comp_cb_p, + IN ci_async_event_cb_t async_cb_p, + IN const void* const ib_context) +{ + cl_status_t cl_status; + + // Setup the callbacks + if (!hob_p->async_proc_mgr_p) + { + hob_p->async_proc_mgr_p = cl_malloc( sizeof( cl_async_proc_t ) ); + if( !hob_p->async_proc_mgr_p ) + { + return IB_INSUFFICIENT_MEMORY; + } + cl_async_proc_construct( hob_p->async_proc_mgr_p ); + cl_status = cl_async_proc_init( hob_p->async_proc_mgr_p, MLNX_NUM_CB_THR, "CBthread" ); + if( cl_status != CL_SUCCESS ) + { + cl_async_proc_destroy( hob_p->async_proc_mgr_p ); + cl_free(hob_p->async_proc_mgr_p); + hob_p->async_proc_mgr_p = NULL; + return IB_INSUFFICIENT_RESOURCES; + } + } + +#ifdef WIN_TO_BE_REMOVED +// need to be removed, because GEN2 doesn't support this API +// callbacks are reported on create_qp + if (hob_p->hh_hndl) + { + THH_hob_set_async_eventh(hob_p->hh_hndl, + mlnx_async_cb, + &hob_p->index); // This is the context our CB wants to receive + THH_hob_set_comp_eventh( hob_p->hh_hndl, + mlnx_comp_cb, + &hob_p->index); // This is the context our CB wants to receive + hob_p->comp_cb_p = comp_cb_p; + hob_p->async_cb_p = async_cb_p; + hob_p->ca_context = ib_context; // This is the context our CB forwards to IBAL + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: hca_idx %d context 0x%p\n", hob_p - mlnx_hob_array, ib_context)); + return IB_SUCCESS; + } + return IB_ERROR; + +#else + + hob_p->comp_cb_p = comp_cb_p; + hob_p->async_cb_p = async_cb_p; + hob_p->ca_context = ib_context; // This is the context our CB forwards to IBAL + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: hca_idx %d context 0x%p\n", hob_p - mlnx_hob_array, ib_context)); + return IB_SUCCESS; + +#endif +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mlnx_hobs_remove( + IN mlnx_hob_t *hob_p) +{ + cl_async_proc_t *p_async_proc; + mlnx_cache_t *p_cache; + + cl_spinlock_acquire( &hob_lock ); + + hob_p->mark = E_MARK_INVALID; + + p_async_proc = hob_p->async_proc_mgr_p; + hob_p->async_proc_mgr_p = NULL; + + p_cache = hob_p->cache; + hob_p->cache = NULL; + + hob_p->comp_cb_p = NULL; + hob_p->async_cb_p = NULL; + hob_p->ca_context = NULL; + hob_p->cl_device_h = NULL; + + cl_spinlock_release( &hob_lock ); + + if( p_async_proc ) + { + cl_async_proc_destroy( p_async_proc ); + cl_free( p_async_proc ); + } + + if( p_cache ) + cl_free( p_cache ); + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: hobs_remove idx %d \n", hob_p - mlnx_hob_array)); +} + +#ifdef WIN_TO_BE_CHANGED + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_hobs_get_context( + IN mlnx_hob_t *hob_p, + OUT void **context_p) +{ + // Verify handle + CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL); + + if (hob_p->hh_hndl) + { + if (context_p) *context_p = &hob_p->index; + return IB_SUCCESS; + } + return IB_ERROR; +} + + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_hobs_lookup( + IN HH_hca_hndl_t hndl, + OUT mlnx_hob_t **hca_p) +{ + u_int32_t idx; + + if (!hca_p) + return IB_ERROR; + + cl_spinlock_acquire( &hob_lock ); + for (idx = 0; idx < MLNX_NUM_HOBKL; idx++) + { + if (hndl == mlnx_hob_array[idx].hh_hndl) + { + *hca_p = &mlnx_hob_array[idx]; + cl_spinlock_release( &hob_lock ); + return IB_SUCCESS; + } + } + cl_spinlock_release( &hob_lock ); + return IB_ERROR; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mlnx_hobs_get_handle( + IN mlnx_hob_t *hob_p, + OUT HH_hca_hndl_t *hndl_p) +{ + // Verify handle + CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL); + + if (hndl_p) + *hndl_p = hob_p->hh_hndl; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +mlnx_hobul_t * +mlnx_hobs_get_hobul( + IN mlnx_hob_t *hob_p) +{ + // Verify handle + if ((hob_p - mlnx_hob_array) >= MLNX_NUM_HOBKL) + return NULL; + + return mlnx_hobul_array[hob_p->index]; +} + + +static int priv_ceil_log2(u_int32_t n) +{ + int shift; + + for (shift = 31; shift >0; shift--) + if (n & (1 << shift)) break; + + if (((unsigned)1 << shift) < n) shift++; + + return shift; +} + +///////////////////////////////////////////////////////// +// ### HOBUL +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_hobul_new( + IN mlnx_hob_t *hob_p, + IN HH_hca_hndl_t hh_hndl, + IN void *resources_p) +{ + mlnx_hobul_t *hobul_p; + HH_hca_dev_t *hca_ul_info; + ib_api_status_t status; + VAPI_hca_cap_t hca_caps; + u_int32_t i; +#if MLNX_COMP_MODEL == 1 + static uint32_t proc_num = 0; +#endif + + // Verify handle + CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL); + + if (NULL == (hobul_p = cl_zalloc( sizeof(mlnx_hobul_t)))) + return IB_INSUFFICIENT_MEMORY; + + // The following will NULL all pointers/sizes (used in cleanup) +// cl_memclr(hobul_p, sizeof (mlnx_hobul_t)); + + hobul_p->hh_hndl = hh_hndl; + + if (HH_OK != THHUL_hob_create(resources_p, hh_hndl->dev_id, &hobul_p->hhul_hndl)) + { + status = IB_INSUFFICIENT_RESOURCES; + goto cleanup; + } + + hca_ul_info = (HH_hca_dev_t *)hh_hndl; + + if (hca_ul_info) + { + hobul_p->vendor_id = hca_ul_info->vendor_id; + hobul_p->device_id = hca_ul_info->dev_id; + hobul_p->hca_ul_resources_p = resources_p; + hobul_p->cq_ul_resources_sz = hca_ul_info->cq_ul_resources_sz; + hobul_p->qp_ul_resources_sz = hca_ul_info->qp_ul_resources_sz; + hobul_p->pd_ul_resources_sz = hca_ul_info->pd_ul_resources_sz; + } + + if (HH_OK != THH_hob_query(hh_hndl, &hca_caps)) + { + status = IB_ERROR; + goto cleanup; + } + + hobul_p->cq_idx_mask = MASK32(priv_ceil_log2(hca_caps.max_num_cq)); + hobul_p->qp_idx_mask = MASK32(priv_ceil_log2(hca_caps.max_num_qp)); // Currently mask = 0xFFFF + hobul_p->max_pd = MASK32(priv_ceil_log2(hca_caps.max_pd_num)) + 1; + hobul_p->max_cq = hobul_p->cq_idx_mask + 1; + hobul_p->max_qp = hobul_p->qp_idx_mask + 1; + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: sizes cq 0%x qp 0%x pd 0%x\n", hca_caps.max_num_cq, hca_caps.max_num_qp, hca_caps.max_pd_num)); + + /* create and initialize the data stucture for CQs */ + hobul_p->cq_info_tbl = cl_zalloc(hobul_p->max_cq * sizeof (cq_info_t)); + + /* create and initialize the data stucture for QPs */ + hobul_p->qp_info_tbl = cl_zalloc(hobul_p->max_qp * sizeof (qp_info_t)); + + /* create and initialize the data stucture for PDs */ + hobul_p->pd_info_tbl = cl_zalloc(hobul_p->max_pd * sizeof (pd_info_t)); + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: alloc failed? cq=%d qp=%d pd=%d\n", + !hobul_p->cq_info_tbl, !hobul_p->qp_info_tbl, !hobul_p->pd_info_tbl)); + + if (!hobul_p->pd_info_tbl || + !hobul_p->qp_info_tbl || + !hobul_p->cq_info_tbl) + { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + /* Initialize all mutexes. */ + for( i = 0; i < hobul_p->max_cq; i++ ) + { + cl_mutex_construct( &hobul_p->cq_info_tbl[i].mutex ); +#if MLNX_COMP_MODEL + KeInitializeDpc( &hobul_p->cq_info_tbl[i].dpc, + mlnx_comp_dpc, &hobul_p->cq_info_tbl[i] ); +#if MLNX_COMP_MODEL == 1 + KeSetTargetProcessorDpc( &hobul_p->cq_info_tbl[i].dpc, + (CCHAR)(proc_num++ % cl_proc_count()) ); +#endif /* MLNX_COMP_MODEL == 1 */ +#endif /* MLNX_COMP_MODEL */ + } + + for( i = 0; i < hobul_p->max_qp; i++ ) + cl_mutex_construct( &hobul_p->qp_info_tbl[i].mutex ); + + for( i = 0; i < hobul_p->max_pd; i++ ) + cl_mutex_construct( &hobul_p->pd_info_tbl[i].mutex ); + + for( i = 0; i < hobul_p->max_cq; i++ ) + { + if( cl_mutex_init( &hobul_p->cq_info_tbl[i].mutex ) != CL_SUCCESS ) + { + status = IB_ERROR; + goto cleanup; + } + } + + for( i = 0; i < hobul_p->max_qp; i++ ) + { + if( cl_mutex_init( &hobul_p->qp_info_tbl[i].mutex ) != CL_SUCCESS ) + { + status = IB_ERROR; + goto cleanup; + } + } + + for( i = 0; i < hobul_p->max_pd; i++ ) + { + if( cl_mutex_init( &hobul_p->pd_info_tbl[i].mutex ) != CL_SUCCESS ) + { + status = IB_ERROR; + goto cleanup; + } + } + + hobul_p->log2_mpt_size = ((THH_hca_ul_resources_t *)resources_p)->log2_mpt_size; + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("log2_mpt_size = %d\n", hobul_p->log2_mpt_size)); + + cl_spinlock_acquire(&hob_lock); + mlnx_hobul_array[hob_p->index] = hobul_p; + cl_spinlock_release(&hob_lock); + + return IB_SUCCESS; + +cleanup: + if (hobul_p->hhul_hndl) THHUL_hob_destroy( hobul_p->hhul_hndl ); + if (hobul_p->pd_info_tbl) + { + for( i = 0; i < hobul_p->max_pd; i++ ) + cl_mutex_destroy( &hobul_p->pd_info_tbl[i].mutex ); + cl_free(hobul_p->pd_info_tbl); + } + if (hobul_p->qp_info_tbl) + { + for( i = 0; i < hobul_p->max_qp; i++ ) + cl_mutex_destroy( &hobul_p->qp_info_tbl[i].mutex ); + cl_free(hobul_p->qp_info_tbl); + } + if (hobul_p->cq_info_tbl) + { + for( i = 0; i < hobul_p->max_cq; i++ ) + cl_mutex_destroy( &hobul_p->cq_info_tbl[i].mutex ); + cl_free(hobul_p->cq_info_tbl); + } + if (hobul_p) cl_free( hobul_p); + return status; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mlnx_hobul_get( + IN mlnx_hob_t *hob_p, + OUT void **resources_p ) +{ + mlnx_hobul_t *hobul_p; + + // Verify handle + CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL); + + hobul_p = mlnx_hobul_array[hob_p->index]; + + if (hobul_p && resources_p) + { + *resources_p = hobul_p->hca_ul_resources_p; + } +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mlnx_hobul_delete( + IN mlnx_hob_t *hob_p) +{ + mlnx_hobul_t *hobul_p; + u_int32_t i; + + // Verify handle + CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL); + + cl_spinlock_acquire(&hob_lock); + hobul_p = mlnx_hobul_array[hob_p->index]; + mlnx_hobul_array[hob_p->index] = NULL; + cl_spinlock_release(&hob_lock); + + if (!hobul_p) return; + + if (hobul_p->hhul_hndl) THHUL_hob_destroy( hobul_p->hhul_hndl ); + if (hobul_p->pd_info_tbl) + { + for( i = 0; i < hobul_p->max_pd; i++ ) + cl_mutex_destroy( &hobul_p->pd_info_tbl[i].mutex ); + cl_free(hobul_p->pd_info_tbl); + } + if (hobul_p->qp_info_tbl) + { + for( i = 0; i < hobul_p->max_qp; i++ ) + cl_mutex_destroy( &hobul_p->qp_info_tbl[i].mutex ); + cl_free(hobul_p->qp_info_tbl); + } + if (hobul_p->cq_info_tbl) + { + for( i = 0; i < hobul_p->max_cq; i++ ) + { + KeRemoveQueueDpc( &hobul_p->cq_info_tbl[i].dpc ); + cl_mutex_destroy( &hobul_p->cq_info_tbl[i].mutex ); + } + cl_free(hobul_p->cq_info_tbl); + } + if (hobul_p) cl_free( hobul_p); +} + +///////////////////////////////////////////////////////// +// ### Callbacks +///////////////////////////////////////////////////////// + +ib_async_event_t +mlnx_map_vapi_event_type( + IN unsigned event_id, + OUT ENUM_EVENT_CLASS *event_class_p) +{ + switch (event_id) + { + case VAPI_QP_PATH_MIGRATED: + if (event_class_p) *event_class_p = E_EV_QP; + return IB_AE_QP_APM; + + case VAPI_QP_COMM_ESTABLISHED: + if (event_class_p) *event_class_p = E_EV_QP; + return IB_AE_QP_COMM; + + case VAPI_SEND_QUEUE_DRAINED: + if (event_class_p) *event_class_p = E_EV_QP; + return IB_AE_SQ_DRAINED; + + case VAPI_CQ_ERROR: + if (event_class_p) *event_class_p = E_EV_CQ; + return IB_AE_CQ_ERROR; + + case VAPI_LOCAL_WQ_INV_REQUEST_ERROR: + if (event_class_p) *event_class_p = E_EV_QP; + return IB_AE_WQ_REQ_ERROR; + + case VAPI_LOCAL_WQ_ACCESS_VIOL_ERROR: + if (event_class_p) *event_class_p = E_EV_QP; + return IB_AE_WQ_ACCESS_ERROR; + + case VAPI_LOCAL_WQ_CATASTROPHIC_ERROR: + if (event_class_p) *event_class_p = E_EV_QP; + return IB_AE_QP_FATAL; + + case VAPI_PATH_MIG_REQ_ERROR: + if (event_class_p) *event_class_p = E_EV_QP; + return IB_AE_QP_APM_ERROR; + + case VAPI_LOCAL_CATASTROPHIC_ERROR: + if (event_class_p) *event_class_p = E_EV_CA; + return IB_AE_LOCAL_FATAL; + + case VAPI_PORT_ERROR: + /* + * In tavor_hca\src\Hca\hcahal\tavor\eventp\event_irqh.c: + * TAVOR_IF_EV_TYPE_PORT_ERR maps one of two port events: + * - TAVOR_IF_SUB_EV_PORT_DOWN + * - TAVOR_IF_SUB_EV_PORT_UP + * + * These map to (respectively) + * - VAPI_PORT_ERROR + * - VAPI_PORT_ACTIVE + */ + if (event_class_p) *event_class_p = E_EV_CA; + return IB_AE_PORT_DOWN; /* INIT, ARMED, DOWN */ + + case VAPI_PORT_ACTIVE: + if (event_class_p) *event_class_p = E_EV_CA; + return IB_AE_PORT_ACTIVE; /* ACTIVE STATE */ + + default: + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("FAIL to map %d (last known %d) returning %d\n", + event_id, VAPI_PORT_ACTIVE, IB_AE_LOCAL_FATAL)); + if (event_class_p) *event_class_p = E_EV_CA; + return IB_AE_LOCAL_FATAL; + } +} + +void +mlnx_conv_vapi_event( + IN HH_event_record_t *hh_event_p, + IN ib_event_rec_t *ib_event_p, + OUT ENUM_EVENT_CLASS *event_class_p) +{ + + // ib_event_p->context is handled by the caller + // + ib_event_p->type = mlnx_map_vapi_event_type(hh_event_p->etype, event_class_p); + + // no traps currently generated + // ib_event_p->trap_info.lid = ; + // ib_event_p->trap_info.port_guid = ; + // ib_event_p->trap_info.port_num = hh_er; +} + +void +mlnx_async_cb( + IN HH_hca_hndl_t hh_hndl, + IN HH_event_record_t *hh_er_p, + IN void *private_data) +{ + u_int32_t obj_idx; + mlnx_hob_t *hob_p; + + mlnx_cb_data_t cb_data; + mlnx_cb_data_t *cb_data_p; + + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC CB %p (0x%x)\n", + private_data, (private_data) ? *(u_int32_t *)private_data : 0xB5)); + + if (!private_data || !hh_er_p) return; + + obj_idx = *(u_int32_t *)private_data; + if (obj_idx >= MLNX_NUM_HOBKL) return; + + hob_p = mlnx_hob_array + obj_idx; + + // g_mlnx_dpc2thread will be initialized as a module paramter (default - disabled(0)) + if (g_mlnx_dpc2thread) + { + cb_data_p = cl_malloc(sizeof(mlnx_cb_data_t)); + if (!cb_data_p) return; + + cb_data_p->hh_hndl = hh_hndl; + cb_data_p->private_data = private_data; + cl_memcpy(&cb_data_p->hh_er, hh_er_p, sizeof(HH_event_record_t)); + cb_data_p->async_item.pfn_callback = mlnx_async_dpc; + cl_async_proc_queue(hob_p->async_proc_mgr_p, &cb_data_p->async_item ); + } else + { + cb_data_p = &cb_data; + + cb_data_p->hh_hndl = hh_hndl; + cb_data_p->private_data = private_data; + cl_memcpy(&cb_data_p->hh_er, hh_er_p, sizeof(HH_event_record_t)); + mlnx_async_dpc( &cb_data_p->async_item ); + } +} + +static void +mlnx_async_dpc( + IN cl_async_proc_item_t *async_item_p ) +{ + HH_event_record_t *hh_er_p; + u_int32_t obj_idx; + mlnx_hob_t *hob_p; + mlnx_hobul_t *hobul_p; + mlnx_cb_data_t *cb_data_p; + + ENUM_EVENT_CLASS event_class; + ib_event_rec_t event_r; + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("ASYNC DPC %p\n", async_item_p)); + + cb_data_p = PARENT_STRUCT( async_item_p, mlnx_cb_data_t, async_item ); + + if (!cb_data_p) return; + + hh_er_p = &cb_data_p->hh_er; + obj_idx = *(u_int32_t *)cb_data_p->private_data; + hob_p = mlnx_hob_array + obj_idx; + hobul_p = mlnx_hobul_array[obj_idx]; + + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC type %d ca_context %p\n", + hh_er_p->etype, hob_p->ca_context)); + + if (!hob_p || + !hobul_p || + !hob_p->hh_hndl || + !hob_p->async_cb_p) + { + goto cleanup; + } + + cl_memclr(&event_r, sizeof(event_r)); + mlnx_conv_vapi_event(hh_er_p, &event_r, &event_class); + + switch(event_class) + { + case E_EV_CA: + event_r.context = (void *)hob_p->ca_context; + break; + + case E_EV_QP: + { + obj_idx = hh_er_p->event_modifier.qpn; + if (obj_idx < hobul_p->max_qp) + event_r.context = (void *)hobul_p->qp_info_tbl[obj_idx].qp_context; + else + { + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC bad qpn 0x%x max 0x%x\n", obj_idx, hobul_p->max_qp)); + goto cleanup; + } + } + break; + + case E_EV_CQ: + { + obj_idx = hh_er_p->event_modifier.cq; + if (obj_idx < hobul_p->max_cq) + event_r.context = (void *)hobul_p->cq_info_tbl[obj_idx].cq_context; + else + { + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC bad cqn 0x%x max 0x%x\n", obj_idx, hobul_p->max_cq)); + goto cleanup; + } + } + break; + + case E_EV_LAST: + default: + // CL_ASSERT(0); // This shouldn't happen + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC unknown event_class 0x%x\n", event_class)); + break; + } + + // Call the registered CB + (*hob_p->async_cb_p)(&event_r); + // Fall Through +cleanup: + if (g_mlnx_dpc2thread) + { + cl_free(cb_data_p); + } +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mlnx_comp_cb( + IN HH_hca_hndl_t hh_hndl, + IN HH_cq_hndl_t hh_cq, + IN void *private_data) +{ +#if MLNX_COMP_MODEL + u_int32_t cq_num; + u_int32_t hca_idx; + mlnx_hob_t *hob_p; + mlnx_hobul_t *hobul_p; +#if MLNX_COMP_MODEL == 2 + static uint32_t proc_num = 0; +#endif + + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP CB cq 0x%x %p\n", hh_cq, private_data)); + + UNUSED_PARAM( hh_hndl ); + + hca_idx = *(u_int32_t *)private_data; + hob_p = mlnx_hob_array + hca_idx; + hobul_p = mlnx_hobul_array[hca_idx]; + cq_num = hh_cq & hobul_p->cq_idx_mask; + + if (NULL != hob_p && NULL != hobul_p && + hob_p->hh_hndl && hob_p->comp_cb_p) + { + if (cq_num < hobul_p->max_cq) + { +#if MLNX_COMP_MODEL == 2 + KeSetTargetProcessorDpc( &hobul_p->cq_info_tbl[cq_num].dpc, + (CCHAR)(proc_num++ % cl_proc_count()) ); +#endif /* MLNX_COMP_MODEL == 2 */ + KeInsertQueueDpc( &hobul_p->cq_info_tbl[cq_num].dpc, + hob_p, NULL ); + } + else + { + HCA_TRACE( HCA_DBG_ERROR, ("CQ index out of range!!!\n") ); + } + } +#else /* MLNX_COMP_MODEL */ + u_int32_t obj_idx; + mlnx_hob_t *hob_p; + + mlnx_cb_data_t cb_data; + mlnx_cb_data_t *cb_data_p; + + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP CB cq 0x%x %p\n", hh_cq, private_data)); + + if (!private_data) return; + + obj_idx = *(u_int32_t *)private_data; + hob_p = mlnx_hob_array + obj_idx; + if (!hob_p) return; + + if (g_mlnx_dpc2thread) + { + cb_data_p = cl_malloc(sizeof(mlnx_cb_data_t)); + if (!cb_data_p) return; + + cb_data_p->hh_hndl = hh_hndl; + cb_data_p->hh_cq = hh_cq; + cb_data_p->private_data = private_data; + + cb_data_p->async_item.pfn_callback = mlnx_comp_dpc; + + // Report completion through async_proc + cl_async_proc_queue(hob_p->async_proc_mgr_p, &cb_data_p->async_item ); + + } else + { + cb_data_p = &cb_data; + + cb_data_p->hh_hndl = hh_hndl; + cb_data_p->hh_cq = hh_cq; + cb_data_p->private_data = private_data; + + // Report completion directly from DPC (verbs should NOT sleep) + mlnx_comp_dpc( &cb_data_p->async_item ); + } +#endif /* MLNX_COMP_MODEL */ +} + +#if MLNX_COMP_MODEL +static void +mlnx_comp_dpc( + IN PRKDPC p_dpc, + IN void *context, + IN void *arg1, + IN void *unused ) +{ + mlnx_hob_t *hob_p = (mlnx_hob_t*)arg1; + UNUSED_PARAM( p_dpc ); + UNUSED_PARAM( unused ); + + hob_p->comp_cb_p( (void*)((cq_info_t*)context)->cq_context ); +} +#else /* MLNX_COMP_MODEL */ +static void +mlnx_comp_dpc( + IN cl_async_proc_item_t *async_item_p ) +{ + u_int32_t cq_num; + u_int32_t hca_idx; + mlnx_hob_t *hob_p; + mlnx_hobul_t *hobul_p; + mlnx_cb_data_t *cb_data_p; + + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP DPC %p\n", async_item_p)); + + cb_data_p = PARENT_STRUCT( async_item_p, mlnx_cb_data_t, async_item ); + if (!cb_data_p) return; + + hca_idx = *(u_int32_t *)cb_data_p->private_data; + hob_p = mlnx_hob_array + hca_idx; + hobul_p = mlnx_hobul_array[hca_idx]; + cq_num = (u_int32_t)cb_data_p->hh_cq & hobul_p->cq_idx_mask; + + if (NULL != hob_p && NULL != hobul_p && + hob_p->hh_hndl && hob_p->comp_cb_p) + { + if (cq_num < hobul_p->max_cq) + { + (*hob_p->comp_cb_p)((void *)hobul_p->cq_info_tbl[cq_num].cq_context); + } + } + + if (g_mlnx_dpc2thread) + { + cl_free(cb_data_p); + } +} +#endif /* MLNX_COMP_MODEL */ + +// ### Conversions + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +VAPI_mrw_acl_t +map_ibal_acl( + IN ib_access_t ibal_acl) +{ + VAPI_mrw_acl_t vapi_acl = 0; + + if (ibal_acl & IB_AC_RDMA_READ) vapi_acl |= VAPI_EN_REMOTE_READ; + if (ibal_acl & IB_AC_RDMA_WRITE) vapi_acl |= VAPI_EN_REMOTE_WRITE; + if (ibal_acl & IB_AC_ATOMIC) vapi_acl |= VAPI_EN_REMOTE_ATOM; + if (ibal_acl & IB_AC_LOCAL_WRITE) vapi_acl |= VAPI_EN_LOCAL_WRITE; + if (ibal_acl & IB_AC_MW_BIND) vapi_acl |= VAPI_EN_MEMREG_BIND; + + return vapi_acl; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_access_t +map_vapi_acl( + IN VAPI_mrw_acl_t vapi_acl) +{ + ib_access_t ibal_acl = 0; + + if (vapi_acl & VAPI_EN_REMOTE_READ) ibal_acl |= IB_AC_RDMA_READ; + if (vapi_acl & VAPI_EN_REMOTE_WRITE) ibal_acl |= IB_AC_RDMA_WRITE; + if (vapi_acl & VAPI_EN_REMOTE_ATOM) ibal_acl |= IB_AC_ATOMIC; + if (vapi_acl & VAPI_EN_LOCAL_WRITE) ibal_acl |= IB_AC_LOCAL_WRITE; + if (vapi_acl & VAPI_EN_MEMREG_BIND) ibal_acl |= IB_AC_MW_BIND; + + return ibal_acl; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +static VAPI_rdma_atom_acl_t +map_ibal_qp_acl( + IN ib_access_t ibal_acl) +{ + VAPI_rdma_atom_acl_t vapi_qp_acl = 0; + + if (ibal_acl & IB_AC_RDMA_WRITE) vapi_qp_acl |= VAPI_EN_REM_WRITE; + if (ibal_acl & IB_AC_RDMA_READ) vapi_qp_acl |= VAPI_EN_REM_READ; + if (ibal_acl & IB_AC_ATOMIC) vapi_qp_acl |= VAPI_EN_REM_ATOMIC_OP; + + return vapi_qp_acl; + +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +static ib_access_t +map_vapi_qp_acl( + IN VAPI_rdma_atom_acl_t vapi_qp_acl) +{ + ib_access_t ibal_acl = IB_AC_LOCAL_WRITE; + + if (vapi_qp_acl & VAPI_EN_REM_WRITE) ibal_acl |= IB_AC_RDMA_WRITE; + if (vapi_qp_acl & VAPI_EN_REM_READ) ibal_acl |= IB_AC_RDMA_READ; + if (vapi_qp_acl & VAPI_EN_REM_ATOMIC_OP) ibal_acl |= IB_AC_ATOMIC; + + return ibal_acl; +} + + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_lock_region( + IN mlnx_mro_t *mro_p, + IN boolean_t um_call ) +{ + MOSAL_iobuf_t old_iobuf; + + // Find context + if( um_call ) + mro_p->mr_prot_ctx = MOSAL_get_current_prot_ctx(); + else + mro_p->mr_prot_ctx = MOSAL_get_kernel_prot_ctx(); + + // Save pointer to existing locked region. + old_iobuf = mro_p->mr_iobuf; + + // Lock Region + if (MT_OK != MOSAL_iobuf_register((MT_virt_addr_t)mro_p->mr_start, + (MT_size_t)mro_p->mr_size, + mro_p->mr_prot_ctx, + mro_p->mr_mosal_perm, + &mro_p->mr_iobuf, + 0 )) + { + return IB_ERROR; + } + + if( old_iobuf ) + { + if( MT_OK != MOSAL_iobuf_deregister( old_iobuf ) ) + return IB_ERROR; + } + + return IB_SUCCESS; +} + + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_conv_ibal_mr_create( + IN u_int32_t pd_idx, + IN OUT mlnx_mro_t *mro_p, + IN VAPI_mr_change_t change_flags, + IN ib_mr_create_t const *p_mr_create, + IN boolean_t um_call, + OUT HH_mr_t *mr_props_p ) +{ + ib_api_status_t status; + + /* Set ACL information first since it is used to lock the region. */ + if( change_flags & VAPI_MR_CHANGE_ACL ) + { + mro_p->mr_acl = map_ibal_acl( p_mr_create->access_ctrl ); + // This computation should be externalized by THH + mro_p->mr_mosal_perm = + MOSAL_PERM_READ | + ((mro_p->mr_acl & VAPI_EN_LOCAL_WRITE) ? MOSAL_PERM_WRITE : 0); + } + + if( change_flags & VAPI_MR_CHANGE_TRANS ) + { + CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("addr 0x%p size %"PRId64"\n", (void *)p_mr_create->vaddr, p_mr_create->length)); + // Build TPT entries + mro_p->mr_start = (IB_virt_addr_t)p_mr_create->vaddr; + mro_p->mr_size = p_mr_create->length; + if (IB_SUCCESS != (status = mlnx_lock_region(mro_p, um_call))) + { + return status; + } + } + + /* Now fill in the MR properties. */ + mr_props_p->start = mro_p->mr_start; + mr_props_p->size = mro_p->mr_size; + mr_props_p->acl = mro_p->mr_acl; + mr_props_p->pd = pd_idx; + + // Setup MTT info + mr_props_p->tpt.tpt_type = HH_TPT_IOBUF; + mr_props_p->tpt.tpt.iobuf = mro_p->mr_iobuf; + + return IB_SUCCESS; +} + +///////////////////////////////////////////////////////// +// On entry mro_p->mr_start holds the pmr address +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_conv_ibal_pmr_create( + IN u_int32_t pd_idx, + IN mlnx_mro_t *mro_p, + IN ib_phys_create_t const *p_pmr_create, + OUT HH_mr_t *mr_props_p ) +{ + VAPI_phy_addr_t* buf_lst = NULL; + VAPI_size_t* sz_lst = NULL; + u_int32_t i; + u_int32_t page_shift = priv_ceil_log2(p_pmr_create->hca_page_size); + u_int64_t page_mask = (1 << page_shift) - 1; + u_int64_t tot_sz = 0; + + CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, + ("PRE: addr %p size 0x%"PRIx64" shift %d\n", + (void *)(uintn_t)mro_p->mr_start, p_pmr_create->length, page_mask)); + mro_p->mr_start = (mro_p->mr_start & ~page_mask) | (p_pmr_create->buf_offset & page_mask); + CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, + ("POST: addr %p\n", (void *)(uintn_t)mro_p->mr_start)); + + mr_props_p->start = mro_p->mr_start; + mr_props_p->size = p_pmr_create->length; + mr_props_p->acl = map_ibal_acl(p_pmr_create->access_ctrl); + mr_props_p->pd = pd_idx; + +#ifdef _DEBUG_ + mro_p->mr_size = mr_props_p->size; +// mro_p->mr_first_page_addr = 0; +// mro_p->mr_num_pages = (mro_p->mr_end >> PAGESHIFT) + 1 - (mro_p->mr_start >> PAGESHIFT); +// CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("1st pg addr 0x%p pages %d\n", +// (void *)mro_p->mr_first_page_addr, p_pmr_create->num_bufs)); + CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("1st phys addr 0x%"PRIx64" phys pages %d\n", + p_pmr_create->range_array[0].base_addr, p_pmr_create->num_ranges)); +#endif + + // Build TPT entries + if (!p_pmr_create->range_array) + { + return IB_INVALID_PARAMETER; + } + + if (p_pmr_create->hca_page_size != + MT_DOWN_ALIGNX_PHYS(p_pmr_create->hca_page_size, page_shift)) + { + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf size is not page aligned\n")); + return IB_INVALID_PARAMETER; + } + + for (i = 0; i < p_pmr_create->num_ranges; i++) + { + uint64_t start_addr = p_pmr_create->range_array[i].base_addr; + uint64_t end_addr = start_addr + p_pmr_create->range_array[i].size; + + if( end_addr < start_addr ) { + CL_TRACE( CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf end < start\n") ); + return IB_INVALID_PARAMETER; + } + + if (start_addr != + MT_DOWN_ALIGNX_PHYS(start_addr, page_shift)) + { + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf start adrs is not page aligned\n")); + return IB_INVALID_PARAMETER; + } + + tot_sz += p_pmr_create->range_array[i].size; + } + + if( tot_sz < p_pmr_create->length + p_pmr_create->buf_offset ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("length(0x"PRIx64") + buf offset(0x"PRIx64") larger than sum " + "of phys ranges(0x"PRIx64")\n", + p_pmr_create->length, p_pmr_create->buf_offset, tot_sz) ); + return IB_INVALID_PARAMETER; + } + + if( p_pmr_create->buf_offset > p_pmr_create->range_array[0].size ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("buf offset(0x%x) > than 1st phy range size(0x"PRIx64")\n", + p_pmr_create->buf_offset, p_pmr_create->range_array[0].size) ); + return IB_INVALID_PARAMETER; + } + + /* Memory registration must be done at PASSIVE_LEVEL, so paged memory here is fine. */ + buf_lst = (VAPI_phy_addr_t*)cl_pzalloc( sizeof(VAPI_phy_addr_t)*(p_pmr_create->num_ranges)); + if (!buf_lst) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("Failed to allocate range address list.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + + /* Memory registration must be done at PASSIVE_LEVEL, so paged memory here is fine. */ + sz_lst = (VAPI_size_t*)cl_pzalloc( sizeof(VAPI_size_t)*(p_pmr_create->num_ranges)); + if (!sz_lst) + { + cl_free( buf_lst ); + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("Failed to allocate range size list.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + for (i = 0; i < p_pmr_create->num_ranges; i++) + { + buf_lst[i] = p_pmr_create->range_array[i].base_addr; + sz_lst[i] = p_pmr_create->range_array[i].size; + } + + mr_props_p->tpt.tpt_type = HH_TPT_BUF; + mr_props_p->tpt.num_entries = p_pmr_create->num_ranges; + mr_props_p->tpt.tpt.buf_lst.buf_sz_lst = sz_lst; + mr_props_p->tpt.tpt.buf_lst.phys_buf_lst = buf_lst; + mr_props_p->tpt.tpt.buf_lst.iova_offset = p_pmr_create->buf_offset; + + return IB_SUCCESS; +} + + +u_int8_t +mlnx_gid_to_index( + IN HH_hca_hndl_t hh_hndl, + IN u_int8_t port_num, + IN u_int8_t *raw_gid) +{ + ib_gid_t *gid_table_p = NULL; + u_int8_t index = 0; // default return value + u_int8_t i; + + gid_table_p = cl_zalloc( 64*sizeof(ib_gid_t)); + + mlnx_get_hca_gid_tbl(hh_hndl, port_num, 64, gid_table_p); + + for (i = 0; i < 64; i++) + { + if (!cl_memcmp(raw_gid, gid_table_p[i].raw, sizeof(ib_gid_t))) + { + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("1: found GID at index %d\n", i)); + index = i; + break; + } + } + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("2: found GID at index %d\n", index)); + + cl_free( gid_table_p); + return index; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mlnx_conv_ibal_av( + IN HH_hca_hndl_t hh_hndl, + IN const ib_av_attr_t *ibal_av_p, + OUT VAPI_ud_av_t *vapi_av_p) +{ + vapi_av_p->port = ibal_av_p->port_num; + vapi_av_p->sl = ibal_av_p->sl; + vapi_av_p->dlid = cl_ntoh16 (ibal_av_p->dlid); + + vapi_av_p->static_rate = + (ibal_av_p->static_rate == IB_PATH_RECORD_RATE_10_GBS? 0 : 3); + ib_grh_get_ver_class_flow( ibal_av_p->grh.ver_class_flow, NULL, + &vapi_av_p->traffic_class, &vapi_av_p->flow_label ); + vapi_av_p->src_path_bits = ibal_av_p->path_bits; // PATH: + //vapi_av_p->src_path_bits = 0; + + /* For global destination or Multicast address:*/ + if (ibal_av_p->grh_valid) + { + vapi_av_p->grh_flag = TRUE; + vapi_av_p->hop_limit = ibal_av_p->grh.hop_limit; + // CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("raw %p, &raw %p\n", ibal_av_p->grh.src_gid.raw, &ibal_av_p->grh.src_gid.raw)); + vapi_av_p->sgid_index = mlnx_gid_to_index(hh_hndl, ibal_av_p->port_num, (u_int8_t *)ibal_av_p->grh.src_gid.raw); + cl_memcpy(vapi_av_p->dgid, ibal_av_p->grh.dest_gid.raw, sizeof(vapi_av_p->dgid)); + } +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mlnx_conv_vapi_av( + IN HH_hca_hndl_t hh_hndl, + IN const VAPI_ud_av_t *vapi_av_p, + OUT ib_av_attr_t *ibal_av_p) +{ + uint8_t ver; + + ibal_av_p->port_num = vapi_av_p->port; + ibal_av_p->sl = vapi_av_p->sl; + ibal_av_p->dlid = cl_ntoh16(vapi_av_p->dlid); + + /* For global destination or Multicast address:*/ + ibal_av_p->grh_valid = vapi_av_p->grh_flag; + + ver = 2; + ibal_av_p->grh.ver_class_flow = ib_grh_set_ver_class_flow( ver, + vapi_av_p->traffic_class, + vapi_av_p->flow_label); + ibal_av_p->grh.hop_limit = vapi_av_p->hop_limit; + + THH_hob_get_sgid(hh_hndl, + vapi_av_p->port, + vapi_av_p->sgid_index, + &ibal_av_p->grh.src_gid.raw); + + cl_memcpy(ibal_av_p->grh.dest_gid.raw, vapi_av_p->dgid, sizeof(vapi_av_p->dgid)); + + ibal_av_p->static_rate = (vapi_av_p->static_rate? + IB_PATH_RECORD_RATE_2_5_GBS : IB_PATH_RECORD_RATE_10_GBS); + ibal_av_p->path_bits = vapi_av_p->src_path_bits; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +int +mlnx_map_vapi_cqe_status( + IN VAPI_wc_status_t vapi_status) +{ + switch (vapi_status) + { + case IB_COMP_SUCCESS: return IB_WCS_SUCCESS; + case IB_COMP_LOC_LEN_ERR: return IB_WCS_LOCAL_LEN_ERR; + case IB_COMP_LOC_QP_OP_ERR: return IB_WCS_LOCAL_OP_ERR; + case IB_COMP_LOC_PROT_ERR: return IB_WCS_LOCAL_PROTECTION_ERR; + case IB_COMP_WR_FLUSH_ERR: return IB_WCS_WR_FLUSHED_ERR; + case IB_COMP_MW_BIND_ERR: return IB_WCS_MEM_WINDOW_BIND_ERR; + case IB_COMP_REM_INV_REQ_ERR: return IB_WCS_REM_INVALID_REQ_ERR; + case IB_COMP_REM_ACCESS_ERR: return IB_WCS_REM_ACCESS_ERR; + case IB_COMP_REM_OP_ERR: return IB_WCS_REM_OP_ERR; + case IB_COMP_RETRY_EXC_ERR: return IB_WCS_TIMEOUT_RETRY_ERR; + case IB_COMP_RNR_RETRY_EXC_ERR: return IB_WCS_RNR_RETRY_ERR; + case IB_COMP_REM_ABORT_ERR: return IB_WCS_REM_ACCESS_ERR; // ??? + case IB_COMP_FATAL_ERR: return IB_WCS_REM_ACCESS_ERR; // ??? + case IB_COMP_GENERAL_ERR: return IB_WCS_REM_ACCESS_ERR; // ??? + default: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map %d (last known %d) returning %d\n", + vapi_status, IB_COMP_GENERAL_ERR, IB_WCS_REM_ACCESS_ERR)); + return IB_WCS_REM_ACCESS_ERR; + } +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +int +mlnx_map_vapi_cqe_type( + IN VAPI_cqe_opcode_t opcode) +{ + switch (opcode) + { + case VAPI_CQE_SQ_SEND_DATA: return IB_WC_SEND; + case VAPI_CQE_SQ_RDMA_WRITE: return IB_WC_RDMA_WRITE; + case VAPI_CQE_SQ_RDMA_READ: return IB_WC_RDMA_READ; + case VAPI_CQE_SQ_COMP_SWAP: return IB_WC_COMPARE_SWAP; + case VAPI_CQE_SQ_FETCH_ADD: return IB_WC_FETCH_ADD; + case VAPI_CQE_SQ_BIND_MRW: return IB_WC_MW_BIND; + case VAPI_CQE_RQ_SEND_DATA: return IB_WC_RECV; + case VAPI_CQE_RQ_RDMA_WITH_IMM: return IB_WC_RECV_RDMA_WRITE; + default: + return IB_WC_SEND; + } +} + +///////////////////////////////////////////////////////// +// Map Remote Node Addr Type +///////////////////////////////////////////////////////// +int +mlnx_map_vapi_rna_type( + IN VAPI_remote_node_addr_type_t rna) +{ + switch (rna) + { + case VAPI_RNA_UD: return IB_QPT_UNRELIABLE_DGRM; + case VAPI_RNA_RAW_ETY: return IB_QPT_RAW_ETHER; + case VAPI_RNA_RAW_IPV6: return IB_QPT_RAW_IPV6; + default: + return IB_QPT_RELIABLE_CONN; + } +} + +////////////////////////////////////////////////////////////// +// Convert from VAPI memory-region attributes to IBAL +////////////////////////////////////////////////////////////// +void +mlnx_conv_vapi_mr_attr( + IN ib_pd_handle_t pd_h, + IN HH_mr_info_t *mr_info_p, + OUT ib_mr_attr_t *mr_query_p) +{ + mr_query_p->h_pd = pd_h; + mr_query_p->local_lb = mr_info_p->local_start; + mr_query_p->local_ub = mr_info_p->local_start + mr_info_p->local_size; + mr_query_p->remote_lb = mr_info_p->remote_start; + mr_query_p->remote_ub = mr_info_p->remote_start + mr_info_p->remote_size; + + mr_query_p->access_ctrl = map_vapi_acl(mr_info_p->acl); + mr_query_p->lkey = mr_info_p->lkey; + mr_query_p->rkey = mr_info_p->rkey; +} + +////////////////////////////////////////////////////////////// +// Convert from IBAL memory-window bind request to VAPI +////////////////////////////////////////////////////////////// +void +mlnx_conv_bind_req( + IN HHUL_qp_hndl_t hhul_qp_hndl, + IN ib_bind_wr_t* const p_mw_bind, + OUT HHUL_mw_bind_t *bind_prop_p) +{ + bind_prop_p->qp = hhul_qp_hndl; + bind_prop_p->id = p_mw_bind->wr_id; + bind_prop_p->acl = map_ibal_acl(p_mw_bind->access_ctrl); + bind_prop_p->size = p_mw_bind->local_ds.length; + bind_prop_p->start = (VAPI_virt_addr_t)(MT_virt_addr_t)p_mw_bind->local_ds.vaddr; + bind_prop_p->mr_lkey = p_mw_bind->local_ds.lkey; + bind_prop_p->comp_type = + (p_mw_bind->send_opt & IB_SEND_OPT_SIGNALED) ? VAPI_SIGNALED : VAPI_UNSIGNALED; +} + + +///////////////////////////////////////////////////////// +// Map IBAL qp type to VAPI transport and special qp_type +///////////////////////////////////////////////////////// +int +mlnx_map_ibal_qp_type( + IN ib_qp_type_t ibal_qpt, + OUT VAPI_special_qp_t *vapi_qp_type_p) +{ + switch (ibal_qpt) + { + case IB_QPT_RELIABLE_CONN: + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP; + return IB_TS_RC; + + case IB_QPT_UNRELIABLE_CONN: + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP; + return IB_TS_UC; + + case IB_QPT_UNRELIABLE_DGRM: + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP; + return IB_TS_UD; + + case IB_QPT_QP0: + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_SMI_QP; + return IB_TS_UD; + + case IB_QPT_QP1: + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP; + return IB_TS_UD; + + case IB_QPT_RAW_IPV6: + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_IPV6_QP; // TBD: ?? + return IB_TS_RAW; + + case IB_QPT_RAW_ETHER: + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_ETY_QP; // TBD: ?? + return IB_TS_RAW; + + case IB_QPT_MAD: + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP; + return IB_TS_UD; + + case IB_QPT_QP0_ALIAS: + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_SMI_QP; + return IB_TS_UD; + + case IB_QPT_QP1_ALIAS: + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP; + return IB_TS_UD; + + default: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map ibal_qp_type %d (last known %d) returning %d\n", + ibal_qpt, IB_QPT_QP1_ALIAS, IB_TS_RAW)); + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_ETY_QP; + return IB_TS_RAW; + } +} + +///////////////////////////////////////////////////////// +// QP and CQ value must be handled by caller +///////////////////////////////////////////////////////// +void +mlnx_conv_qp_create_attr( + IN const ib_qp_create_t *create_attr_p, + OUT HHUL_qp_init_attr_t *init_attr_p, + OUT VAPI_special_qp_t *vapi_qp_type_p) +{ + init_attr_p->ts_type = mlnx_map_ibal_qp_type(create_attr_p->qp_type, vapi_qp_type_p); + + init_attr_p->qp_cap.max_oust_wr_sq = create_attr_p->sq_depth; + init_attr_p->qp_cap.max_oust_wr_rq = create_attr_p->rq_depth; + init_attr_p->qp_cap.max_sg_size_sq = create_attr_p->sq_sge; + init_attr_p->qp_cap.max_sg_size_rq = create_attr_p->rq_sge; + + init_attr_p->sq_sig_type = (create_attr_p->sq_signaled) ? VAPI_SIGNAL_ALL_WR : VAPI_SIGNAL_REQ_WR; + init_attr_p->rq_sig_type = VAPI_SIGNAL_ALL_WR; + + init_attr_p->srq = HHUL_INVAL_SRQ_HNDL; +} + +///////////////////////////////////////////////////////// +// NOTE: ibal_qp_state is non linear - so we cannot use a LUT +///////////////////////////////////////////////////////// +VAPI_qp_state_t +mlnx_map_ibal_qp_state( + IN ib_qp_state_t ibal_qp_state) +{ + VAPI_qp_state_t vapi_qp_state = VAPI_RESET; + + if (ibal_qp_state & IB_QPS_RESET) vapi_qp_state = VAPI_RESET; + else if (ibal_qp_state & IB_QPS_INIT) vapi_qp_state = VAPI_INIT; + else if (ibal_qp_state & IB_QPS_RTR) vapi_qp_state = VAPI_RTR; + else if (ibal_qp_state & IB_QPS_RTS) vapi_qp_state = VAPI_RTS; + else if (ibal_qp_state & IB_QPS_SQD) vapi_qp_state = VAPI_SQD; + else if (ibal_qp_state & IB_QPS_SQERR) vapi_qp_state = VAPI_SQE; + else if (ibal_qp_state & IB_QPS_ERROR) vapi_qp_state = VAPI_ERR; + + return vapi_qp_state; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_qp_state_t +mlnx_map_vapi_qp_state( + IN VAPI_qp_state_t vapi_qp_state) +{ + switch (vapi_qp_state) + { + case VAPI_RESET: return IB_QPS_RESET; + case VAPI_INIT: return IB_QPS_INIT; + case VAPI_RTR: return IB_QPS_RTR; + case VAPI_RTS: return IB_QPS_RTS; + case VAPI_SQD: return IB_QPS_SQD; + case VAPI_SQE: return IB_QPS_SQERR; + case VAPI_ERR: return IB_QPS_ERROR; + // TBD: IB_QPS_SQD_DRAINING + // TBD: IB_QPS_SQD_DRAINED + default: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map vapi_qp_state %d (last known %d) returning %d\n", + vapi_qp_state, VAPI_ERR, IB_QPS_INIT)); + return IB_QPS_INIT; + } +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_apm_state_t +mlnx_map_vapi_apm_state( + IN VAPI_mig_state_t vapi_apm_state) +{ + switch (vapi_apm_state) + { + case VAPI_MIGRATED: return IB_APM_MIGRATED; + case VAPI_REARM: return IB_APM_REARM; + case VAPI_ARMED: return IB_APM_ARMED; + + default: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map vapi_apm_state %d (last known %d) returning %d\n", + vapi_apm_state, VAPI_ARMED, 0)); + return 0; + } +} + +#if 0 +///////////////////////////////////////////////////////// +// UNUSED: IBAL uses same encoding as THH +///////////////////////////////////////////////////////// +static +u_int32_t ibal_mtu_to_vapi(u_int32_t ibal_mtu) +{ + u_int32_t mtu = 0; + + // MTU256=1, MTU512=2, MTU1024=3 + while (ibal_mtu >>= 1) mtu++; + return mtu - 7; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +static +u_int32_t vapi_mtu_to_ibal(u_int32_t vapi_mtu) +{ + return (1 << (vapi_mtu + 7)); +} +#endif + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mlnx_conv_vapi_qp_attr( + IN HH_hca_hndl_t hh_hndl, + IN VAPI_qp_attr_t *hh_qp_attr_p, + OUT ib_qp_attr_t *qp_attr_p) +{ + qp_attr_p->access_ctrl = map_vapi_qp_acl(hh_qp_attr_p->remote_atomic_flags); + qp_attr_p->pkey_index = (uint16_t)hh_qp_attr_p->pkey_ix; + qp_attr_p->sq_depth = hh_qp_attr_p->cap.max_oust_wr_sq; + qp_attr_p->rq_depth = hh_qp_attr_p->cap.max_oust_wr_rq; + qp_attr_p->sq_sge = hh_qp_attr_p->cap.max_sg_size_sq; + qp_attr_p->rq_sge = hh_qp_attr_p->cap.max_sg_size_rq; + qp_attr_p->sq_max_inline = hh_qp_attr_p->cap.max_inline_data_sq; + qp_attr_p->init_depth = hh_qp_attr_p->ous_dst_rd_atom; // outstanding outgoing + qp_attr_p->resp_res = hh_qp_attr_p->qp_ous_rd_atom; // outstanding as target (in) + + qp_attr_p->num = cl_ntoh32(hh_qp_attr_p->qp_num); + CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("ibal_qpn 0x%x = hh_qpn 0x%x\n", + qp_attr_p->num, + hh_qp_attr_p->qp_num)); + + qp_attr_p->dest_num = cl_ntoh32(hh_qp_attr_p->dest_qp_num); + CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("ibal_dest 0x%x = hh_dest 0x%x\n", + qp_attr_p->dest_num, + hh_qp_attr_p->dest_qp_num)); + qp_attr_p->qkey = cl_ntoh32 (hh_qp_attr_p->qkey); + + qp_attr_p->sq_psn = cl_ntoh32 (hh_qp_attr_p->sq_psn); + qp_attr_p->rq_psn = cl_ntoh32 (hh_qp_attr_p->rq_psn); + + qp_attr_p->primary_port = hh_qp_attr_p->port; + qp_attr_p->alternate_port = hh_qp_attr_p->alt_port; + + qp_attr_p->state = mlnx_map_vapi_qp_state(hh_qp_attr_p->qp_state); + qp_attr_p->apm_state = mlnx_map_vapi_apm_state(hh_qp_attr_p->path_mig_state); + + mlnx_conv_vapi_av(hh_hndl, &hh_qp_attr_p->av, &qp_attr_p->primary_av); + qp_attr_p->primary_av.conn.path_mtu = (u_int8_t)hh_qp_attr_p->path_mtu; + qp_attr_p->primary_av.conn.local_ack_timeout = hh_qp_attr_p->timeout; + qp_attr_p->primary_av.conn.seq_err_retry_cnt = hh_qp_attr_p->retry_count; + qp_attr_p->primary_av.conn.rnr_retry_cnt = hh_qp_attr_p->rnr_retry; + + mlnx_conv_vapi_av(hh_hndl, &hh_qp_attr_p->alt_av, &qp_attr_p->alternate_av); + qp_attr_p->alternate_av.conn. path_mtu = (u_int8_t)hh_qp_attr_p->path_mtu; + qp_attr_p->alternate_av.conn.local_ack_timeout = hh_qp_attr_p->timeout; + qp_attr_p->alternate_av.conn.seq_err_retry_cnt = hh_qp_attr_p->retry_count; + qp_attr_p->alternate_av.conn.rnr_retry_cnt = hh_qp_attr_p->rnr_retry; +} +#if 0 +XXX: +QP_ATTR_QP_STATE +QP_ATTR_EN_SQD_ASYN_NOTIF +QP_ATTR_QP_NUM ++ QP_ATTR_REMOTE_ATOMIC_FLAGS ++ QP_ATTR_PKEY_IX ++ QP_ATTR_PORT ++ QP_ATTR_QKEY ++ QP_ATTR_RQ_PSN ++ QP_ATTR_AV + +QP_ATTR_PATH_MTU ++ QP_ATTR_TIMEOUT ++ QP_ATTR_RETRY_COUNT ++ QP_ATTR_RNR_RETRY +QP_ATTR_QP_OUS_RD_ATOM + +- QP_ATTR_ALT_PATH + ++ QP_ATTR_MIN_RNR_TIMER +QP_ATTR_SQ_PSN +QP_ATTR_OUS_DST_RD_ATOM +QP_ATTR_PATH_MIG_STATE +QP_ATTR_CAP +#endif + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_conv_qp_modify_attr( + IN HH_hca_hndl_t hh_hndl, + IN ib_qp_type_t qp_type, + IN const ib_qp_mod_t *modify_attr_p, + OUT VAPI_qp_attr_t *qp_attr_p, + OUT VAPI_qp_attr_mask_t *attr_mask_p) +{ + + qp_attr_p->qp_state = mlnx_map_ibal_qp_state(modify_attr_p->req_state); + *attr_mask_p = QP_ATTR_QP_STATE; + + switch(modify_attr_p->req_state) + { + case IB_QPS_RESET: + break; + + case IB_QPS_INIT: + *attr_mask_p |= QP_ATTR_PORT | + QP_ATTR_QKEY | + QP_ATTR_PKEY_IX ; + + qp_attr_p->port = modify_attr_p->state.init.primary_port; + qp_attr_p->qkey = cl_ntoh32 (modify_attr_p->state.init.qkey); + qp_attr_p->pkey_ix = modify_attr_p->state.init.pkey_index; + if (IB_QPT_RELIABLE_CONN == qp_type) + { + *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS; + qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.init.access_ctrl); + } else + { + qp_attr_p->remote_atomic_flags = 0; + } + break; + + case IB_QPS_RTR: + /* VAPI doesn't support modifying the WQE depth ever. */ + if( modify_attr_p->state.rtr.opts & IB_MOD_QP_SQ_DEPTH || + modify_attr_p->state.rtr.opts & IB_MOD_QP_RQ_DEPTH ) + { + return IB_UNSUPPORTED; + } + + *attr_mask_p |= QP_ATTR_RQ_PSN | + QP_ATTR_DEST_QP_NUM | + QP_ATTR_QP_OUS_RD_ATOM | + QP_ATTR_MIN_RNR_TIMER | + QP_ATTR_AV ; + + qp_attr_p->rq_psn = cl_ntoh32 (modify_attr_p->state.rtr.rq_psn); + qp_attr_p->dest_qp_num = cl_ntoh32 (modify_attr_p->state.rtr.dest_qp); + qp_attr_p->qp_ous_rd_atom = modify_attr_p->state.rtr.resp_res; + + if (modify_attr_p->state.rtr.opts & IB_MOD_QP_RNR_NAK_TIMEOUT) + { + qp_attr_p->min_rnr_timer = modify_attr_p->state.rtr.rnr_nak_timeout; + } else + { + qp_attr_p->min_rnr_timer = 0; + } + +#if 1 + CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("modify_qp: hh_dest 0x%x = ibal_dest 0x%x\n", + qp_attr_p->dest_qp_num, modify_attr_p->state.rtr.dest_qp)); +#endif + + // Convert primary RC AV (mandatory) + cl_memclr(&qp_attr_p->av, sizeof(VAPI_ud_av_t)); + mlnx_conv_ibal_av(hh_hndl, + &modify_attr_p->state.rtr.primary_av, &qp_attr_p->av); + + if (IB_QPT_RELIABLE_CONN == qp_type) + { + *attr_mask_p |= QP_ATTR_PATH_MTU; + qp_attr_p->path_mtu = modify_attr_p->state.rtr.primary_av.conn.path_mtu; // MTU + + qp_attr_p->timeout = modify_attr_p->state.rtr.primary_av.conn.local_ack_timeout; // XXX: conv + qp_attr_p->retry_count = modify_attr_p->state.rtr.primary_av.conn.seq_err_retry_cnt; + qp_attr_p->rnr_retry = modify_attr_p->state.rtr.primary_av.conn.rnr_retry_cnt; + } + + // Convert Remote Atomic Flags + if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL) + { + *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS; + qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.rtr.access_ctrl); + } + + // Convert alternate RC AV + if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV) + { + *attr_mask_p |= QP_ATTR_ALT_PATH; + cl_memclr(&qp_attr_p->alt_av, sizeof(VAPI_ud_av_t)); + mlnx_conv_ibal_av(hh_hndl, + &modify_attr_p->state.rtr.alternate_av, &qp_attr_p->alt_av); + + if (IB_QPT_RELIABLE_CONN == qp_type) + { + qp_attr_p->alt_timeout = modify_attr_p->state.rtr.alternate_av.conn.local_ack_timeout; // XXX: conv +#if 0 + /* Incompliant with spec 1.1! Data already set before */ + qp_attr_p->retry_count = modify_attr_p->state.rtr.alternate_av.conn.seq_err_retry_cnt; + qp_attr_p->rnr_retry = modify_attr_p->state.rtr.alternate_av.conn.rnr_retry_cnt; +#endif + } + } + break; + + case IB_QPS_RTS: + /* VAPI doesn't support modifying the WQE depth ever. */ + if( modify_attr_p->state.rts.opts & IB_MOD_QP_SQ_DEPTH || + modify_attr_p->state.rts.opts & IB_MOD_QP_RQ_DEPTH ) + { + return IB_UNSUPPORTED; + } + + *attr_mask_p |= QP_ATTR_SQ_PSN | + QP_ATTR_RETRY_COUNT | + QP_ATTR_RNR_RETRY | + QP_ATTR_OUS_DST_RD_ATOM | + QP_ATTR_MIN_RNR_TIMER; + + qp_attr_p->sq_psn = cl_ntoh32 (modify_attr_p->state.rts.sq_psn); + + if (modify_attr_p->state.rts.opts & IB_MOD_QP_ACCESS_CTRL) + { + *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS; + qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.rts.access_ctrl); + } + + qp_attr_p->timeout = modify_attr_p->state.rts.local_ack_timeout; // XXX: conv + qp_attr_p->ous_dst_rd_atom = modify_attr_p->state.rts.init_depth; + qp_attr_p->retry_count = modify_attr_p->state.rts.retry_cnt; + qp_attr_p->rnr_retry = modify_attr_p->state.rts.rnr_retry_cnt; + qp_attr_p->min_rnr_timer = modify_attr_p->state.rts.rnr_nak_timeout; + + // Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS) + if (modify_attr_p->state.rts.opts & IB_MOD_QP_RESP_RES) { + *attr_mask_p |= QP_ATTR_QP_OUS_RD_ATOM; + qp_attr_p->qp_ous_rd_atom = modify_attr_p->state.rts.resp_res; + } + + // Convert alternate RC AV + if (modify_attr_p->state.rts.opts & IB_MOD_QP_ALTERNATE_AV) + { + *attr_mask_p |= QP_ATTR_ALT_PATH; + cl_memclr(&qp_attr_p->alt_av, sizeof(VAPI_ud_av_t)); + mlnx_conv_ibal_av(hh_hndl, + &modify_attr_p->state.rts.alternate_av, &qp_attr_p->alt_av); + if (IB_QPT_RELIABLE_CONN == qp_type) + { + qp_attr_p->alt_timeout = modify_attr_p->state.rts.alternate_av.conn.local_ack_timeout; // XXX: conv +#if 0 + /* Incompliant with spec 1.1! Data already set before */ + qp_attr_p->retry_count = modify_attr_p->state.rts.alternate_av.conn.seq_err_retry_cnt; + qp_attr_p->rnr_retry = modify_attr_p->state.rts.alternate_av.conn.rnr_retry_cnt; +#endif + } + } + break; + + // TBD: The following are treated equally (SQ Drain) + case IB_QPS_SQD: + case IB_QPS_SQD_DRAINING: + case IB_QPS_SQD_DRAINED: + *attr_mask_p |= QP_ATTR_EN_SQD_ASYN_NOTIF; + qp_attr_p->en_sqd_asyn_notif = (MT_bool)modify_attr_p->state.sqd.sqd_event; + break; + + case IB_QPS_SQERR: + case IB_QPS_ERROR: + case IB_QPS_TIME_WAIT: + default: + break; + } + CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("CL: conv_qp_modify: new state %d attr_mask 0x%x\n", qp_attr_p->qp_state, *attr_mask_p)); + return IB_SUCCESS; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +static VAPI_wr_opcode_t +map_ibal_send_opcode( + IN ib_wr_type_t ibal_opcode, + IN boolean_t imm) +{ + VAPI_wr_opcode_t vapi_opcode; + + switch (ibal_opcode) + { + case WR_SEND: vapi_opcode = VAPI_SEND; + break; + case WR_RDMA_WRITE: vapi_opcode = VAPI_RDMA_WRITE; + break; + case WR_RDMA_READ: vapi_opcode = VAPI_RDMA_READ; + break; + case WR_COMPARE_SWAP: vapi_opcode = VAPI_ATOMIC_CMP_AND_SWP; + break; + case WR_FETCH_ADD: vapi_opcode = VAPI_ATOMIC_FETCH_AND_ADD; + break; + default: vapi_opcode = VAPI_SEND; + break; + } + if (imm && (VAPI_SEND == vapi_opcode || VAPI_RDMA_WRITE == vapi_opcode)) vapi_opcode++; + return vapi_opcode; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_conv_send_desc( + IN IB_ts_t transport, + IN const ib_send_wr_t *ibal_send_wqe_p, + OUT VAPI_sr_desc_t *vapi_send_desc_p) +{ + boolean_t imm = FALSE; + u_int32_t idx; + register VAPI_sg_lst_entry_t *sg_lst_p; + register ib_local_ds_t *ds_array; + + + switch (transport) + { + case IB_TS_UD: + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("mapping %s QP\n", "UD")); + { + mlnx_avo_t *avo_p = (mlnx_avo_t *)ibal_send_wqe_p->dgrm.ud.h_av; + + vapi_send_desc_p->remote_qp = cl_ntoh32 (ibal_send_wqe_p->dgrm.ud.remote_qp); + vapi_send_desc_p->remote_qkey = cl_ntoh32 (ibal_send_wqe_p->dgrm.ud.remote_qkey); + + if (!avo_p || avo_p->mark != E_MARK_AV) + return IB_INVALID_AV_HANDLE; + + vapi_send_desc_p->remote_ah = avo_p->h_av; // was ah.hhul + break; + } + + case IB_TS_RC: + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("mapping %s QP\n", "RC")); + // vapi_send_desc_p->remote_qp = 0; + // vapi_send_desc_p->remote_qkey = 0; + vapi_send_desc_p->remote_addr = ibal_send_wqe_p->remote_ops.vaddr; + vapi_send_desc_p->r_key = ibal_send_wqe_p->remote_ops.rkey; + vapi_send_desc_p->compare_add = ibal_send_wqe_p->remote_ops.atomic1; + vapi_send_desc_p->swap = ibal_send_wqe_p->remote_ops.atomic2; + break; + + default: // TBD: RAW, RD + return IB_UNSUPPORTED; + } + + imm = (0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_IMMEDIATE)); + vapi_send_desc_p->fence = (MT_bool)(0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_FENCE)); + vapi_send_desc_p->set_se = (MT_bool)(0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_SOLICITED)); + vapi_send_desc_p->comp_type = (ibal_send_wqe_p->send_opt & IB_SEND_OPT_SIGNALED) ? +VAPI_SIGNALED : VAPI_UNSIGNALED; + + vapi_send_desc_p->id = ibal_send_wqe_p->wr_id; + vapi_send_desc_p->opcode = map_ibal_send_opcode(ibal_send_wqe_p->wr_type, imm); + + if (imm) + vapi_send_desc_p->imm_data = cl_ntoh32 (ibal_send_wqe_p->immediate_data); + + vapi_send_desc_p->sg_lst_len = ibal_send_wqe_p->num_ds; + + sg_lst_p = vapi_send_desc_p->sg_lst_p; + ds_array = ibal_send_wqe_p->ds_array; + for (idx = 0; idx < ibal_send_wqe_p->num_ds; idx++) + { + sg_lst_p->addr = ds_array->vaddr; + sg_lst_p->len = ds_array->length; + sg_lst_p->lkey = ds_array->lkey; + // CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("post_send (conv) addr %Lx size %d key 0x%x\n", sg_lst_p->addr, sg_lst_p->len, sg_lst_p->lkey)); + sg_lst_p++; + ds_array++; + } + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("send: rqpn 0x%x rkey 0x%x\n", + vapi_send_desc_p->remote_qp, + vapi_send_desc_p->remote_qkey)); + return IB_SUCCESS; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_conv_recv_desc( + IN const ib_recv_wr_t *ibal_recv_wqe_p, + OUT VAPI_rr_desc_t *vapi_recv_desc_p) +{ + u_int32_t idx; + register VAPI_sg_lst_entry_t *sg_lst_p; + register ib_local_ds_t *ds_array; + + vapi_recv_desc_p->id = ibal_recv_wqe_p->wr_id; + vapi_recv_desc_p->sg_lst_len = ibal_recv_wqe_p->num_ds; + vapi_recv_desc_p->opcode = VAPI_RECEIVE; + vapi_recv_desc_p->comp_type = VAPI_SIGNALED; + + sg_lst_p = vapi_recv_desc_p->sg_lst_p; + ds_array = ibal_recv_wqe_p->ds_array; + for (idx = 0; idx < ibal_recv_wqe_p->num_ds; idx++) + { + sg_lst_p->addr = ds_array->vaddr; + sg_lst_p->len = ds_array->length; + sg_lst_p->lkey = ds_array->lkey; + // CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("post_recv (conv) addr 0x%Lx size %d key 0x%x\n", sg_lst_p->addr, sg_lst_p->len, sg_lst_p->lkey)); + sg_lst_p++; + ds_array++; + } + + return IB_SUCCESS; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mthca_port_cap_to_ibal( + IN IB_port_cap_mask_t mthca_port_cap, + OUT ib_port_cap_t *ibal_port_cap_p) +{ + if (mthca_port_cap & IB_CAP_MASK_IS_CONN_MGMT_SUP) + ibal_port_cap_p->cm = TRUE; + if (mthca_port_cap & IB_CAP_MASK_IS_SNMP_TUNN_SUP) + ibal_port_cap_p->snmp = TRUE; + if (mthca_port_cap & IB_CAP_MASK_IS_DEVICE_MGMT_SUP) + ibal_port_cap_p->dev_mgmt = TRUE; + if (mthca_port_cap & IB_CAP_MASK_IS_VENDOR_CLS_SUP) + ibal_port_cap_p->vend = TRUE; + if (mthca_port_cap & IB_CAP_MASK_IS_SM_DISABLED) + ibal_port_cap_p->sm_disable = TRUE; + if (mthca_port_cap & IB_CAP_MASK_IS_SM) + ibal_port_cap_p->sm = TRUE; +} + +///////////////////////////////////////////////////////// + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_get_hca_pkey_tbl( + IN HH_hca_hndl_t hh_hndl, + IN u_int8_t port_num, + IN u_int16_t num_entries, + OUT void* table_p) +{ + u_int16_t size; + ib_net16_t *pkey_p; + + if (HH_OK != THH_hob_get_pkey_tbl( hh_hndl, port_num, num_entries, &size, table_p)) + return IB_ERROR; + + pkey_p = (ib_net16_t *)table_p; +#if 0 + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("port %d pkey0 0x%x pkey1 0x%x\n", port_num, pkey_p[0], pkey_p[1])); +#endif + return IB_SUCCESS; +} + +ib_api_status_t +mlnx_get_hca_gid_tbl( + IN HH_hca_hndl_t hh_hndl, + IN u_int8_t port_num, + IN u_int16_t num_entries, + OUT void* table_p) +{ + u_int16_t size; + + if (HH_OK != THH_hob_get_gid_tbl( hh_hndl, port_num, num_entries, &size, table_p)) + return IB_ERROR; + + return IB_SUCCESS; +} + +#endif + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mthca_port_cap_to_ibal( + IN u32 mthca_port_cap, + OUT ib_port_cap_t *ibal_port_cap_p) +{ + if (mthca_port_cap & IB_PORT_CM_SUP) + ibal_port_cap_p->cm = TRUE; + if (mthca_port_cap & IB_PORT_SNMP_TUNNEL_SUP) + ibal_port_cap_p->snmp = TRUE; + if (mthca_port_cap & IB_PORT_DEVICE_MGMT_SUP) + ibal_port_cap_p->dev_mgmt = TRUE; + if (mthca_port_cap & IB_PORT_VENDOR_CLASS_SUP) + ibal_port_cap_p->vend = TRUE; + if (mthca_port_cap & IB_PORT_SM_DISABLED) + ibal_port_cap_p->sm_disable = TRUE; + if (mthca_port_cap & IB_PORT_SM) + ibal_port_cap_p->sm = TRUE; +} + + +///////////////////////////////////////////////////////// +void +mlnx_conv_hca_cap( + IN struct ib_device *ib_dev, + IN struct ib_device_attr *hca_info_p, + IN struct ib_port_attr *hca_ports, + OUT ib_ca_attr_t *ca_attr_p) +{ + u_int8_t port_num; + ib_port_attr_t *ibal_port_p; + struct ib_port_attr *mthca_port_p; + + ca_attr_p->vend_id = hca_info_p->vendor_id; + ca_attr_p->dev_id = (uint16_t)hca_info_p->vendor_part_id; + ca_attr_p->revision = (uint16_t)hca_info_p->hw_ver; + //TODO: convert guid ? + ca_attr_p->ca_guid = *(UNALIGNED64 u_int64_t *)&hca_info_p->node_guid; + ca_attr_p->num_ports = ib_dev->phys_port_cnt; + ca_attr_p->max_qps = hca_info_p->max_qp; + ca_attr_p->max_wrs = hca_info_p->max_qp_wr; + ca_attr_p->max_sges = hca_info_p->max_sge; + ca_attr_p->max_rd_sges = hca_info_p->max_sge_rd; + ca_attr_p->max_cqs = hca_info_p->max_cq; + ca_attr_p->max_cqes = hca_info_p->max_cqe; + ca_attr_p->max_pds = hca_info_p->max_pd; + ca_attr_p->init_regions = hca_info_p->max_mr; + ca_attr_p->init_windows = hca_info_p->max_mw; + ca_attr_p->init_region_size = hca_info_p->max_mr_size; + ca_attr_p->max_addr_handles = hca_info_p->max_ah; + ca_attr_p->atomicity = hca_info_p->atomic_cap; + ca_attr_p->max_partitions = hca_info_p->max_pkeys; + ca_attr_p->max_qp_resp_res =(uint8_t) hca_info_p->max_qp_rd_atom; + ca_attr_p->max_resp_res = (uint8_t)hca_info_p->max_res_rd_atom; + ca_attr_p->max_qp_init_depth = (uint8_t)hca_info_p->max_qp_init_rd_atom; + ca_attr_p->max_ipv6_qps = hca_info_p->max_raw_ipv6_qp; + ca_attr_p->max_ether_qps = hca_info_p->max_raw_ethy_qp; + ca_attr_p->max_mcast_grps = hca_info_p->max_mcast_grp; + ca_attr_p->max_mcast_qps = hca_info_p->max_total_mcast_qp_attach; + ca_attr_p->max_qps_per_mcast_grp = hca_info_p->max_mcast_qp_attach; + ca_attr_p->local_ack_delay = hca_info_p->local_ca_ack_delay; + ca_attr_p->bad_pkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_PKEY_CNTR; + ca_attr_p->bad_qkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_QKEY_CNTR; + ca_attr_p->raw_mcast_support = hca_info_p->device_cap_flags & IB_DEVICE_RAW_MULTI; + ca_attr_p->apm_support = hca_info_p->device_cap_flags & IB_DEVICE_AUTO_PATH_MIG; + ca_attr_p->av_port_check = hca_info_p->device_cap_flags & IB_DEVICE_UD_AV_PORT_ENFORCE; + ca_attr_p->change_primary_port = hca_info_p->device_cap_flags & IB_DEVICE_CHANGE_PHY_PORT; + ca_attr_p->modify_wr_depth = hca_info_p->device_cap_flags & IB_DEVICE_RESIZE_MAX_WR; + ca_attr_p->hw_agents = FALSE; // in the context of IBAL then agent is implemented on the host + + ca_attr_p->num_page_sizes = 1; + ca_attr_p->p_page_size[0] = PAGE_SIZE; // TBD: extract an array of page sizes from HCA cap + + for (port_num = 0; port_num <= end_port(ib_dev) - start_port(ib_dev); ++port_num) + { + // Setup port pointers + ibal_port_p = &ca_attr_p->p_port_attr[port_num]; + mthca_port_p = &hca_ports[port_num]; + + // Port Cabapilities + cl_memclr(&ibal_port_p->cap, sizeof(ib_port_cap_t)); + mthca_port_cap_to_ibal(mthca_port_p->port_cap_flags, &ibal_port_p->cap); + + // Port Atributes + ibal_port_p->port_num = port_num + start_port(ib_dev); + ibal_port_p->port_guid = ibal_port_p->p_gid_table[0].unicast.interface_id; + ibal_port_p->lid = cl_ntoh16(mthca_port_p->lid); + ibal_port_p->lmc = mthca_port_p->lmc; + ibal_port_p->max_vls = mthca_port_p->max_vl_num; + ibal_port_p->sm_lid = cl_ntoh16(mthca_port_p->sm_lid); + ibal_port_p->sm_sl = mthca_port_p->sm_sl; + ibal_port_p->link_state = (mthca_port_p->state != 0) ? (uint8_t)mthca_port_p->state : IB_LINK_DOWN; + ibal_port_p->num_gids = (uint16_t)mthca_port_p->gid_tbl_len; + ibal_port_p->num_pkeys = mthca_port_p->pkey_tbl_len; + ibal_port_p->pkey_ctr = (uint16_t)mthca_port_p->bad_pkey_cntr; + ibal_port_p->qkey_ctr = (uint16_t)mthca_port_p->qkey_viol_cntr; + ibal_port_p->max_msg_size = mthca_port_p->max_msg_sz; + ibal_port_p->mtu = (u_int8_t)mthca_port_p->max_mtu; + + ibal_port_p->subnet_timeout = mthca_port_p->subnet_timeout; + // ibal_port_p->local_ack_timeout = 3; // TBD: currently ~32 usec +#if 0 + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("Port %d port_guid 0x%"PRIx64"\n", + ibal_port_p->port_num, ibal_port_p->port_guid)); +#endif + } +} + +void cq_comp_handler(struct ib_cq *cq, void *context) +{ + mlnx_hob_t *hob_p = (mlnx_hob_t *)context; + if (hob_p) + (hob_p->comp_cb_p)(cq->cq_context); + else { + HCA_TRACE (CL_DBG_ERROR, ("Incorrect context. Completion callback was not invoked\n")); + } +} + +void ca_event_handler(struct ib_event *ev, void *context) +{ + mlnx_hob_t *hob_p = (mlnx_hob_t *)context; + ib_event_rec_t event_rec; + + // prepare parameters + event_rec.context = (void *)hob_p->ca_context; + event_rec.trap.info.port_num = ev->element.port_num; + event_rec.type = ev->event; + if (event_rec.type > IB_AE_UNKNOWN) { + // CL_ASSERT(0); // This shouldn't happen + HCA_TRACE(HCA_DBG_ERROR, ("Unmapped E_EV_CA event of type 0x%x. Replaced by 0x%x (IB_AE_LOCAL_FATAL)\n", + event_rec.type, IB_AE_LOCAL_FATAL)); + event_rec.type = IB_AE_LOCAL_FATAL; + } + + // call the user callback + if (hob_p) + (hob_p->async_cb_p)(&event_rec); + else { + HCA_TRACE (CL_DBG_ERROR, ("Incorrect context. Async callback was not invoked\n")); + } +} + +void qp_event_handler(struct ib_event *ev, void *context) +{ + mlnx_hob_t *hob_p = (mlnx_hob_t *)context; + ib_event_rec_t event_rec; + struct mthca_qp *qp_p; + + // prepare parameters + event_rec.type = ev->event; + qp_p = (struct mthca_qp *)ev->element.qp; + event_rec.context = qp_p->qp_context; + + // call the user callback + if (hob_p) + (hob_p->async_cb_p)(&event_rec); + else { + HCA_TRACE (CL_DBG_ERROR, ("Incorrect context. Async callback was not invoked\n")); + } +} + +void cq_event_handler(struct ib_event *ev, void *context) +{ + mlnx_hob_t *hob_p = (mlnx_hob_t *)context; + ib_event_rec_t event_rec; + struct mthca_cq *cq_p; + + // prepare parameters + event_rec.type = ev->event; + cq_p = (struct mthca_cq *)ev->element.cq; + event_rec.context = cq_p->cq_context; + + // call the user callback + if (hob_p) + (hob_p->async_cb_p)(&event_rec); + else { + HCA_TRACE (CL_DBG_ERROR, ("Incorrect context. Async callback was not invoked\n")); + } +} + + diff --git a/branches/MTHCA/hw/mthca/kernel/hca_data.h b/branches/MTHCA/hw/mthca/kernel/hca_data.h new file mode 100644 index 00000000..14e65229 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/hca_data.h @@ -0,0 +1,700 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: hca_data.h 148 2005-07-12 07:48:46Z sleybo $ + */ + +#ifndef __HCA_DATA_H__ +#define __HCA_DATA_H__ + + +#include +#include +#include +#include + +#ifndef WIN_TO_BE_REMOVED + +//TODO: temp data type +// THH +#define HH_hca_hndl_t int +#define HH_hca_dev_t int +#define HH_cq_hndl_t int +#define HH_event_record_t int +#define HH_pd_hndl_t int +#define HH_mr_t int +#define HH_mr_info_t int +// HHUL +#define HHUL_pd_hndl_t int +#define HHUL_qp_hndl_t int +#define HHUL_cq_hndl_t int +#define HHUL_hca_hndl_t int +#define HHUL_ud_av_hndl_t int +#define HHUL_mw_bind_t int +#define HHUL_qp_init_attr_t int + +// VAPI +#define VAPI_sr_desc_t int +#define VAPI_rr_desc_t int +#define VAPI_sg_lst_entry_t int +#define VAPI_sg_lst_entry_t int +#define VAPI_mrw_acl_t int +#define VAPI_lkey_t int +#define VAPI_ud_av_t int +#define VAPI_mr_change_t int +#define VAPI_wc_status_t int +#define VAPI_cqe_opcode_t int +#define VAPI_remote_node_addr_type_t int +#define VAPI_qp_attr_t int +#define VAPI_qp_attr_mask_t int +#define VAPI_sr_desc_t int +#define VAPI_hca_cap_t int +#define VAPI_special_qp_t int +#define VAPI_hca_port_t int + +// MOSAL +#define MOSAL_protection_ctx_t int +#define MOSAL_mem_perm_t int +#define MOSAL_iobuf_t int +#define MT_size_t int + +//TODO: replace by u64 et al +typedef uint64_t u_int64_t; +typedef uint32_t u_int32_t; +typedef uint16_t u_int16_t; +typedef uint8_t u_int8_t; + +// taken from ib_defs.h +typedef u_int32_t IB_wqpn_t; /* Work QP number: Only 24 LSbits */ +typedef u_int8_t IB_port_t; +typedef u_int8_t IB_gid_t[16]; /* GID (aka IPv6) H-to-L (big) (network) endianess */ +typedef u_int32_t IB_ts_t; + + +#endif + +extern u_int32_t g_mlnx_dbg_lvl; +extern uint32_t g_sqp_max_avs; +extern char mlnx_uvp_lib_name[]; + +#define MLNX_DBG_INFO (1<<1) +#define MLNX_DBG_TRACE (1<<2) +#define MLNX_DBG_VERBOSE (1<<3) +// for data path debugging +#define MLNX_DBG_DIRECT (1<<4) +#define MLNX_DBG_QPN (1<<5) +#define MLNX_DBG_MEM (1<<6) + +#define MLNX_MAX_HCA 4 +#define MLNX_NUM_HOBKL MLNX_MAX_HCA +#define MLNX_NUM_HOBUL MLNX_MAX_HCA +#define MLNX_NUM_CB_THR 1 +#define MLNX_SIZE_CB_POOL 256 +#define MLNX_UAL_ALLOC_HCA_UL_RES 1 +#define MLNX_UAL_FREE_HCA_UL_RES 2 + + +// Defines for QP ops +#define MLNX_MAX_NUM_SGE 8 +#define MLNX_MAX_WRS_PER_CHAIN 4 + +#define MLNX_NUM_RESERVED_QPS 16 + +/* + * Completion model. + * 0: No DPC processor assignment + * 1: DPCs per-CQ, processor affinity set at CQ initialization time. + * 2: DPCs per-CQ, processor affinity set at runtime. + * 3: DPCs per-CQ, no processor affinity set. + */ +#define MLNX_COMP_MODEL 3 + +#ifdef WIN_TO_BE_REMOVED +#define PD_HCA_FROM_HNDL(hndl) (((pd_info_t *)hndl)->hca_idx) +#define PD_NUM_FROM_HNDL(hndl) (((pd_info_t *)hndl)->pd_num) +#define CQ_HCA_FROM_HNDL(hndl) (((cq_info_t *)hndl)->hca_idx) +#define CQ_NUM_FROM_HNDL(hndl) (((cq_info_t *)hndl)->cq_num) +#define QP_HCA_FROM_HNDL(hndl) (((qp_info_t *)hndl)->hca_idx) +#define QP_NUM_FROM_HNDL(hndl) (((qp_info_t *)hndl)->qp_num) + +#define PD_HNDL_FROM_PD(pd_num) (&hobul_p->pd_info_tbl[pd_num]) +#define CQ_HNDL_FROM_CQ(cq_num) (&hobul_p->cq_info_tbl[cq_num]) +#define QP_HNDL_FROM_QP(qp_num) (&hobul_p->qp_info_tbl[qp_num]) +#else +// incorrect: #define HOBUL_FROM_PD(hndl) container_of(hndl, mlnx_hobul_t, pd_info_tbl) +#endif + +#ifdef _DEBUG_ +#define VALIDATE_INDEX(index, limit, error, label) \ + { \ + if (index >= limit) \ + { \ + status = error; \ + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("file %s line %d\n", __FILE__, __LINE__)); \ + goto label; \ + } \ + } +#else +#define VALIDATE_INDEX(index, limit, error, label) +#endif + + + +// Typedefs + +typedef enum { + E_EV_CA=1, + E_EV_QP, + E_EV_CQ, + E_EV_LAST +} ENUM_EVENT_CLASS; + +typedef enum { + E_MARK_CA=1, // Channel Adaptor + E_MARK_PD, // Protection Domain + E_MARK_CQ, // Completion Queue + E_MARK_QP, // Queue Pair + E_MARK_AV, // Address Vector (UD) + E_MARK_MG, // Multicast Group + E_MARK_MR, // Memory Region + E_MARK_MW, // Memory Windows + E_MARK_INVALID, +} ENUM_MARK; + +typedef enum { + E_MR_PHYS=1, + E_MR_SHARED, + E_MR_ANY, + E_MR_INVALID +} ENUM_MR_TYPE; + +/* + * Attribute cache for port info saved to expedite local MAD processing. + * Note that the cache accounts for the worst case GID and PKEY table size + * but is allocated from paged pool, so it's nothing to worry about. + */ + +typedef struct _guid_block +{ + boolean_t valid; + ib_guid_info_t tbl; + +} mlnx_guid_block_t; + +typedef struct _port_info_cache +{ + boolean_t valid; + ib_port_info_t info; + +} mlnx_port_info_cache_t; + +typedef struct _pkey_block +{ + boolean_t valid; + ib_pkey_table_info_t tbl; + +} mlnx_pkey_block_t; + +typedef struct _sl_vl_cache +{ + boolean_t valid; + ib_slvl_table_t tbl; + +} mlnx_sl_vl_cache_t; + +typedef struct _vl_arb_block +{ + boolean_t valid; + ib_vl_arb_table_t tbl; + +} mlnx_vl_arb_block_t; + +typedef struct _attr_cache +{ + mlnx_guid_block_t guid_block[32]; + mlnx_port_info_cache_t port_info; + mlnx_pkey_block_t pkey_tbl[2048]; + mlnx_sl_vl_cache_t sl_vl; + mlnx_vl_arb_block_t vl_arb[4]; + +} mlnx_cache_t; + +typedef struct _ib_ca { + ENUM_MARK mark; + ci_completion_cb_t comp_cb_p; + ci_async_event_cb_t async_cb_p; + const void *ca_context; + void *cl_device_h; + u_int32_t index; + cl_async_proc_t *async_proc_mgr_p; + mlnx_cache_t *cache; // Cached port attributes. + const void * __ptr64 p_dev_obj; // store underlying device object +} mlnx_hob_t; + +typedef struct _ib_um_ca +{ + MDL *p_mdl; + void *p_mapped_addr; + HH_hca_hndl_t hh_hndl; + HH_hca_dev_t dev_info; + uint8_t ul_hca_res[1]; // Beginning of UL resource buffer. + +} mlnx_um_ca_t; + +typedef struct { + cl_async_proc_item_t async_item; + HH_hca_hndl_t hh_hndl; + HH_cq_hndl_t hh_cq; // for completion + HH_event_record_t hh_er; // for async events + void *private_data; +} mlnx_cb_data_t; + +#ifdef WIN_TO_BE_REMOVED +typedef struct _ib_pd { /* struct of PD related resources */ + ENUM_MARK mark; + u_int32_t kernel_mode; + cl_mutex_t mutex; + atomic32_t count; + u_int32_t hca_idx; + // mlnx_hob_t *hob_p; + HH_hca_hndl_t hh_hndl; /* For HH direct access */ + HH_pd_hndl_t pd_num; /* For HH-UL direct access */ + HHUL_pd_hndl_t hhul_pd_hndl; + void *pd_ul_resources_p; +} pd_info_t; + +#else + +struct _ib_pd { + struct ib_pd ib_pd; +} pd_info_t; + +#endif + +#ifdef WIN_TO_BE_REMOVED +typedef struct _ib_cq { /* struct of CQ related resources */ + ENUM_MARK mark; + cl_mutex_t mutex; + u_int32_t hca_idx; + u_int32_t kernel_mode; + // mlnx_hob_t *hob_p; + HH_hca_hndl_t hh_hndl; /* For HH direct access */ + HH_cq_hndl_t cq_num; /* For HH-UL direct access */ +// HH_pd_hndl_t pd_num; /* For HH-UL direct access */ + HHUL_cq_hndl_t hhul_cq_hndl; + void *cq_ul_resources_p; + const void *cq_context; + KDPC dpc; + atomic32_t spl_qp_cnt; + +} cq_info_t; +#else + +struct _ib_cq { + struct ib_cq ibcq; +} cq_info_t; + +#endif + +#ifdef WIN_TO_BE_REMOVED +typedef struct _ib_qp { + ENUM_MARK mark; + cl_mutex_t mutex; + u_int32_t hca_idx; + u_int32_t kernel_mode; + // mlnx_hob_t *hob_p; + HH_hca_hndl_t hh_hndl; // For HH direct access */ + HHUL_qp_hndl_t hhul_qp_hndl; + IB_wqpn_t qp_num; // For direct HH-UL access */ + HH_pd_hndl_t pd_num; // For HH-UL direct access */ + IB_port_t port; // Valid for special QPs only */ + ib_qp_type_t qp_type; // Required for qp_query + u_int32_t sq_signaled; // Required for qp_query + ib_cq_handle_t h_sq_cq; + ib_cq_handle_t h_rq_cq; + u_int32_t sq_size; + u_int32_t rq_size; + VAPI_sr_desc_t *send_desc_p; + VAPI_rr_desc_t *recv_desc_p; + VAPI_sg_lst_entry_t *send_sge_p; + VAPI_sg_lst_entry_t *recv_sge_p; + void *qp_ul_resources_p; + const void *qp_context; +} qp_info_t; +#else + +struct _ib_qp { + // must be the first + struct ib_qp ibqp; +} qp_info_t; + +#endif + +typedef struct HOBUL_t { + int dummy; +#ifdef WIN_TO_BE_REMOVED + pd_info_t *pd_info_tbl; + HH_hca_hndl_t hh_hndl; /* For HH direct access */ + HHUL_hca_hndl_t hhul_hndl; /* user level HCA resources handle for HH */ + u_int32_t cq_idx_mask; /* */ + u_int32_t qp_idx_mask; /* */ + u_int32_t vendor_id; /* \ */ + u_int32_t device_id; /* > 3 items needed for initializing user level */ + void *hca_ul_resources_p; /* / */ + MT_size_t cq_ul_resources_sz; /* Needed for allocating user resources for CQs */ + MT_size_t qp_ul_resources_sz; /* Needed for allocating user resources for QPs */ + MT_size_t pd_ul_resources_sz; /* Needed for allocating user resources for PDs */ + u_int32_t max_cq; /* Max num. of CQs - size of following table */ + cq_info_t *cq_info_tbl; + u_int32_t max_qp; /* Max num. of QPs - size of following table */ + qp_info_t *qp_info_tbl; + u_int32_t max_pd; /* Max num. of PDs - size of following table */ + u_int32_t log2_mpt_size; + atomic32_t count; +#endif +} mlnx_hobul_t, *mlnx_hobul_hndl_t; + +#ifdef WIN_TO_BE_REMOVED +typedef struct _ib_mr { + ENUM_MARK mark; + ENUM_MR_TYPE mr_type; + u_int64_t mr_start; // TBD: IA64 + u_int64_t mr_size; // TBD: IA64 +// u_int64_t mr_first_page_addr; // TBD : IA64 +// u_int32_t mr_num_pages; + ib_pd_handle_t mr_pd_handle; + MOSAL_iobuf_t mr_iobuf; + VAPI_mrw_acl_t mr_acl; + VAPI_lkey_t mr_lkey; + MOSAL_protection_ctx_t mr_prot_ctx; + MOSAL_mem_perm_t mr_mosal_perm; +} mlnx_mro_t; +#else + +typedef struct _ib_mr { + struct ib_mr ib_mr; +} mlnx_mro_t; + +#endif +typedef struct _ib_mw { + ENUM_MARK mark; + u_int32_t hca_idx; + u_int32_t pd_idx; + u_int32_t mw_rkey; +} mlnx_mwo_t; + +typedef struct _ib_mcast { + ENUM_MARK mark; + IB_gid_t mcast_gid; + u_int32_t hca_idx; + u_int32_t qp_num; + u_int32_t kernel_mode; +} mlnx_mcast_t; + +typedef struct _ib_av { + ENUM_MARK mark; + u_int32_t hca_idx; + u_int32_t pd_idx; + u_int32_t user_mode; + HHUL_ud_av_hndl_t h_av; +} mlnx_avo_t; + +typedef struct { + cl_list_item_t list_item; // to include in the HCA chain + net64_t guid; // HCA node Guid + struct mthca_dev *mdev; // VP Driver device + u_int32_t hw_ver; // HCA HW version + mlnx_hob_t hob; // HOB - IBAL-related HCA resources + mlnx_hobul_t hobul; // HOBUL - - IBAL-related kernel client resources + +#ifdef WIN_TO_BE_REMOVED + // removed as it is found in p_ext->cl_ext.p_pdo + const void* __ptr64 p_dev_obj; // Driver PDO +#endif +} mlnx_hca_t; + + +typedef mlnx_hob_t *mlnx_hca_h; + +// Global Variables +//extern mlnx_hca_t mlnx_hca_array[]; +//extern uint32_t mlnx_num_hca; + +extern mlnx_hob_t mlnx_hob_array[]; +extern mlnx_hobul_t *mlnx_hobul_array[]; + +// Functions +void +setup_ci_interface( + IN const ib_net64_t ca_guid, + OUT ci_interface_t *p_interface ); + +void +mlnx_hca_insert( + IN mlnx_hca_t *p_hca ); + +void +mlnx_hca_remove( + IN mlnx_hca_t *p_hca ); + +mlnx_hca_t* +mlnx_hca_from_guid( + IN ib_net64_t guid ); + +/* +void +mlnx_names_from_guid( + IN ib_net64_t guid, + OUT char **hca_name_p, + OUT char **dev_name_p); +*/ + +cl_status_t +mlnx_hcas_init( void ); + +cl_status_t +mlnx_hobs_init( void ); + +ib_api_status_t +mlnx_hobs_insert( + IN mlnx_hca_t *p_hca, + OUT mlnx_hob_t **hob_p); + +void +mlnx_hobs_get_handle( + IN mlnx_hob_t *hob_p, + OUT HH_hca_hndl_t *hndl_p); + +ib_api_status_t +mlnx_hobs_set_cb( + IN mlnx_hob_t *hob_p, + IN ci_completion_cb_t comp_cb_p, + IN ci_async_event_cb_t async_cb_p, + IN const void* const ib_context); + +ib_api_status_t +mlnx_hobs_get_context( + IN mlnx_hob_t *hob_p, + OUT void **context_p); + +ib_api_status_t +mlnx_hobs_create_device( + IN mlnx_hob_t *hob_p, + OUT char* dev_name); + +void +mlnx_hobs_remove( + IN mlnx_hob_t *hob_p); + +ib_api_status_t +mlnx_hobs_lookup( + IN HH_hca_hndl_t hndl, + OUT mlnx_hob_t **hca_p); + +mlnx_hobul_t * +mlnx_hobs_get_hobul( + IN mlnx_hob_t *hob_p); + +ib_api_status_t +mlnx_hobul_new( + IN mlnx_hob_t *hob_p, + IN HH_hca_hndl_t hh_hndl, + IN void *resources_p); + +void +mlnx_hobul_get( + IN mlnx_hob_t *hob_p, + OUT void **resources_p ); + +void +mlnx_hobul_delete( + IN mlnx_hob_t *hob_p); + +// Conversion Functions + +VAPI_mrw_acl_t +map_ibal_acl( + IN ib_access_t ibal_acl); + +ib_access_t +map_vapi_acl( + IN VAPI_mrw_acl_t vapi_acl); + +ib_api_status_t +mlnx_lock_region( + IN mlnx_mro_t *mro_p, + IN boolean_t um_call ); + +ib_api_status_t +mlnx_conv_ibal_mr_create( + IN u_int32_t pd_idx, + IN OUT mlnx_mro_t *mro_p, + IN VAPI_mr_change_t change_flags, + IN ib_mr_create_t const *p_mr_create, + IN boolean_t um_call, + OUT HH_mr_t *mr_props_p ); + +ib_api_status_t +mlnx_conv_ibal_pmr_create( + IN u_int32_t pd_idx, + IN mlnx_mro_t *mro_p, + IN ib_phys_create_t const *p_pmr_create, + OUT HH_mr_t *mr_props_p ); + +void +mlnx_conv_ibal_av( + IN HH_hca_hndl_t hh_hndl, + IN const ib_av_attr_t *ibal_av_p, + OUT VAPI_ud_av_t *vapi_av_p); + +void +mlnx_conv_vapi_av( + IN HH_hca_hndl_t hh_hndl, + IN const VAPI_ud_av_t *vapi_av_p, + OUT ib_av_attr_t *ibal_av_p); + +int +mlnx_map_vapi_cqe_status( + IN VAPI_wc_status_t vapi_status); + +int +mlnx_map_vapi_cqe_type( + IN VAPI_cqe_opcode_t opcode); + +int +mlnx_map_vapi_rna_type( + IN VAPI_remote_node_addr_type_t rna); + +void +mlnx_conv_vapi_mr_attr( + IN ib_pd_handle_t pd_h, + IN HH_mr_info_t *mr_info_p, + OUT ib_mr_attr_t *mr_query_p); + +void +mlnx_conv_bind_req( + IN HHUL_qp_hndl_t hhul_qp_hndl, + IN ib_bind_wr_t* const p_mw_bind, + OUT HHUL_mw_bind_t *bind_prop_p); + +int +mlnx_map_ibal_qp_type( + IN ib_qp_type_t ibal_qpt, + OUT VAPI_special_qp_t *vapi_qp_type_p); + +void +mlnx_conv_qp_create_attr( + IN const ib_qp_create_t *create_attr_p, + IN HHUL_qp_init_attr_t *init_attr_p, + OUT VAPI_special_qp_t *vapi_qp_type_p); + +void +mlnx_conv_vapi_qp_attr( + IN HH_hca_hndl_t hh_hndl, + IN VAPI_qp_attr_t *hh_qp_attr_p, + OUT ib_qp_attr_t *qp_attr_p); + +ib_api_status_t +mlnx_conv_qp_modify_attr( + IN HH_hca_hndl_t hh_hndl, + IN ib_qp_type_t qp_type, + IN const ib_qp_mod_t *modify_attr_p, + OUT VAPI_qp_attr_t *qp_attr_p, + OUT VAPI_qp_attr_mask_t *attr_mask_p); + +ib_api_status_t +mlnx_conv_send_desc( + IN IB_ts_t transport, + IN const ib_send_wr_t *ibal_send_wqe_p, + OUT VAPI_sr_desc_t *vapi_send_desc_p); + +ib_api_status_t +mlnx_conv_recv_desc( + IN const ib_recv_wr_t *ibal_recv_wqe_p, + OUT VAPI_rr_desc_t *vapi_recv_desc_p); + +void +mlnx_conv_hca_cap( + IN struct ib_device *ib_dev, + IN struct ib_device_attr *hca_info_p, + IN struct ib_port_attr *hca_ports, + OUT ib_ca_attr_t *ca_attr_p); + +ib_api_status_t +mlnx_get_hca_pkey_tbl( + IN HH_hca_hndl_t hh_hndl, + IN u_int8_t port_num, + IN u_int16_t num_entries, + OUT void* table); + +ib_api_status_t +mlnx_get_hca_gid_tbl( + IN HH_hca_hndl_t hh_hndl, + IN u_int8_t port_num, + IN u_int16_t num_entries, + OUT void* table); + +ib_api_status_t +mlnx_local_mad ( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_mad_t *p_mad_in, + OUT ib_mad_t *p_mad_out ); + +void +mlnx_memory_if( + IN OUT ci_interface_t *p_interface ); + +void +mlnx_ecc_if( + IN OUT ci_interface_t *p_interface ); + +void +mlnx_direct_if( + IN OUT ci_interface_t *p_interface ); + +void +mlnx_mcast_if( + IN OUT ci_interface_t *p_interface ); + +ib_api_status_t +fw_access_ctrl( + IN const void* __ptr64 context, + IN const void* __ptr64* const handle_array OPTIONAL, + IN uint32_t num_handles, + IN ib_ci_op_t* const p_ci_op, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL); + +void cq_comp_handler(struct ib_cq *cq, void *context); +void ca_event_handler(struct ib_event *ev, void *context); +void qp_event_handler(struct ib_event *ev, void *context); +void cq_event_handler(struct ib_event *ev, void *context); + + +#endif diff --git a/branches/MTHCA/hw/mthca/kernel/hca_debug.h b/branches/MTHCA/hw/mthca/kernel/hca_debug.h new file mode 100644 index 00000000..b6871b53 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/hca_debug.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: hca_debug.h 46 2005-05-30 17:55:53Z sleybo $ + */ + + +#if !defined( _HCA_DEBUG_H_ ) +#define _HCA_DEBUG_H_ + +#include + + +#define HCA_DBG_DEV (1 << 0) +#define HCA_DBG_PNP (1 << 1) +#define HCA_DBG_PO (1 << 2) +#define HCA_DBG_WARN (1 << 30) + +#define HCA_DBG_ERROR CL_DBG_ERROR +#define HCA_DBG_FULL CL_DBG_ALL + + +extern uint32_t g_mlnx_dbg_lvl; + + +#define HCA_ENTER( msg_lvl ) \ + CL_ENTER( msg_lvl, g_mlnx_dbg_lvl ) + +#define HCA_EXIT( msg_lvl ) \ + CL_EXIT( msg_lvl, g_mlnx_dbg_lvl ) + +#define HCA_TRACE( msg_lvl, msg ) \ + CL_TRACE( (msg_lvl), g_mlnx_dbg_lvl, msg ) + +#define HCA_TRACE_ERR( msg_lvl, msg ) \ + if ( status != IB_SUCCESS) \ + CL_TRACE( (msg_lvl), g_mlnx_dbg_lvl, msg ) + +#define HCA_TRACE_EXIT( msg_lvl, msg ) \ + CL_TRACE_EXIT( msg_lvl, g_mlnx_dbg_lvl, msg ) + +#define HCA_PRINT( msg_lvl, msg ) \ + CL_PRINT( msg_lvl, g_mlnx_dbg_lvl, msg ) + +#endif /* !defined( _HCA_DEBUG_H_ ) */ + + diff --git a/branches/MTHCA/hw/mthca/kernel/hca_direct.c b/branches/MTHCA/hw/mthca/kernel/hca_direct.c new file mode 100644 index 00000000..4dbc5bd1 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/hca_direct.c @@ -0,0 +1,566 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: hca_direct.c 148 2005-07-12 07:48:46Z sleybo $ + */ + + +#include "hca_data.h" + + +/* Controls whether to use the VAPI entrypoints in THH, or the IBAL native ones. */ +#define MLNX_SEND_NATIVE 1 +#define MLNX_RECV_NATIVE 1 +#define MLNX_POLL_NATIVE 1 + + +/* +* Work Request Processing Verbs. +*/ +ib_api_status_t +mlnx_post_send ( + IN const ib_qp_handle_t h_qp, + IN ib_send_wr_t *p_send_wr, + OUT ib_send_wr_t **pp_failed ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_qp); + UNREFERENCED_PARAMETER(p_send_wr); + UNREFERENCED_PARAMETER(pp_failed); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_post_send not implemented\n")); + return IB_INVALID_CA_HANDLE; +#else + ib_api_status_t status = IB_SUCCESS; + qp_info_t *qp_info_p = (qp_info_t *)h_qp; + u_int32_t qp_idx = 0; + mlnx_hobul_t *hobul_p; + + // CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hca %x qp %x\n", qp_info_p->hca_idx, qp_info_p->qp_num)); + if( !p_send_wr ) + { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + if (!qp_info_p || E_MARK_QP != qp_info_p->mark) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + VALIDATE_INDEX(qp_info_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_QP_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[qp_info_p->hca_idx]; + + CL_ASSERT(hobul_p); + CL_ASSERT(hobul_p->qp_info_tbl); + + qp_idx = qp_info_p->qp_num & hobul_p->qp_idx_mask; + VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup); + +#ifdef WIN_TO_BE_REMOVED + return THHUL_qpm_post_send_wrs( hobul_p->hhul_hndl, + qp_info_p->hhul_qp_hndl, p_send_wr, pp_failed ); +#else + // int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr); + //dev->ib_dev.post_send = mthca_tavor_post_send; +#endif + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + return status; +#endif +} + + +ib_api_status_t +mlnx_post_recv ( + IN const ib_qp_handle_t h_qp, + IN ib_recv_wr_t *p_recv_wr, + OUT ib_recv_wr_t **pp_failed OPTIONAL ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_qp); + UNREFERENCED_PARAMETER(p_recv_wr); + UNREFERENCED_PARAMETER(pp_failed); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_post_recv not implemented\n")); + return IB_INVALID_CA_HANDLE; +#else + ib_api_status_t status = IB_SUCCESS; + qp_info_t *qp_info_p = (qp_info_t *)h_qp; + + u_int32_t qp_idx = 0; + mlnx_hobul_t *hobul_p; +#if !MLNX_RECV_NATIVE + HH_ret_t ret; + ib_recv_wr_t *wqe_p; + IB_ts_t transport; + VAPI_rr_desc_t recv_desc; + VAPI_special_qp_t vapi_qp_type; +#endif + + // CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hca %x qp %x\n", + // qp_info_p->hca_idx, qp_info_p->qp_num)); + if( !p_recv_wr ) + { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + if (!qp_info_p || E_MARK_QP != qp_info_p->mark) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + VALIDATE_INDEX(qp_info_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_QP_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[qp_info_p->hca_idx]; + + CL_ASSERT(hobul_p); + CL_ASSERT(hobul_p->qp_info_tbl); + + qp_idx = qp_info_p->qp_num & hobul_p->qp_idx_mask; + VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup); + +#if MLNX_RECV_NATIVE + return THHUL_qpm_post_recv_wrs( hobul_p->hhul_hndl, qp_info_p->hhul_qp_hndl, + p_recv_wr, pp_failed ); +#else + // Assuming that posting all WQE will succeed. Errors are handled below. + *pp_failed = NULL; + + // Loop and post all descriptors in list, bail out on failure. + transport = mlnx_map_ibal_qp_type(qp_info_p->qp_type, &vapi_qp_type); + + if (VAPI_REGULAR_QP != vapi_qp_type) + { + memset(&recv_desc, 0, sizeof(recv_desc)); + recv_desc.sg_lst_p = hobul_p->qp_info_tbl[qp_idx].recv_sge_p; + for (wqe_p = p_recv_wr; wqe_p; wqe_p = wqe_p->p_next) + { + // rq_size is a misnomer, it is really max_sge + CL_ASSERT( hobul_p->qp_info_tbl[qp_idx].rq_size >= wqe_p->num_ds); + + mlnx_conv_recv_desc(wqe_p, &recv_desc); + if (HH_OK != (ret = THHUL_qpm_post_recv_req(hobul_p->hhul_hndl, qp_info_p->hhul_qp_hndl, &recv_desc))) + { + status = (HH_EAGAIN == ret) ? IB_INSUFFICIENT_RESOURCES : + (HH_EINVAL_SG_NUM == ret) ? IB_INVALID_MAX_SGE : + IB_ERROR; + + *pp_failed = wqe_p; + // wqe_p->p_next = NULL; + goto cleanup; + } + } + } + else { + // For regular QP use real send multiple + VAPI_rr_desc_t desc_list[MLNX_MAX_WRS_PER_CHAIN]; + VAPI_sg_lst_entry_t sg_list[MLNX_MAX_WRS_PER_CHAIN][MLNX_MAX_NUM_SGE]; + u_int32_t num_wrs; + + wqe_p = p_recv_wr; + while (wqe_p) { + for (num_wrs = 0; (num_wrs < MLNX_MAX_WRS_PER_CHAIN) && wqe_p; + wqe_p = wqe_p->p_next, num_wrs++) + { + desc_list [num_wrs].sg_lst_p = &sg_list [num_wrs][0]; + status = mlnx_conv_recv_desc(wqe_p, &desc_list[num_wrs]); + if (status != IB_SUCCESS) { + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, + ("FAILED to map the recv_desc %d\n", num_wrs)); + break; + } + } + // CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("num_wrs %d\n", num_wrs)); + if (num_wrs > 0) { + if (num_wrs > 1) { + ret = THHUL_qpm_post_recv_reqs (hobul_p->hhul_hndl, + qp_info_p->hhul_qp_hndl, + num_wrs, desc_list); + } else { + ret = THHUL_qpm_post_recv_req (hobul_p->hhul_hndl, + qp_info_p->hhul_qp_hndl, + desc_list); + } + if (HH_OK != ret) { + status = (HH_EAGAIN == ret) ? IB_INSUFFICIENT_RESOURCES : + (HH_EINVAL_SG_NUM == ret) ? IB_INVALID_MAX_SGE : + IB_ERROR; + *pp_failed = wqe_p; + // wqe_p->p_next = NULL; + goto cleanup; + } + } else { + /* no work requests this round */ + CL_TRACE (MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("NO WRs\n")); + *pp_failed = wqe_p; + break; + } + } + } + + return status; +#endif + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + return status; +#endif +} + +/* +* Completion Processing and Completion Notification Request Verbs. +*/ + +ib_api_status_t +mlnx_peek_cq( + IN const ib_cq_handle_t h_cq, + OUT uint32_t* const p_n_cqes ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_cq); + UNREFERENCED_PARAMETER(p_n_cqes); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_peek_cq not implemented\n")); + return IB_INVALID_CA_HANDLE; +#else + ib_api_status_t status = IB_UNKNOWN_ERROR; + + u_int32_t hca_idx = CQ_HCA_FROM_HNDL(h_cq); + u_int32_t cq_num = CQ_NUM_FROM_HNDL(h_cq); + u_int32_t cq_idx; + mlnx_hobul_t *hobul_p; + HHUL_cq_hndl_t hhul_cq_hndl; + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p || NULL == hobul_p->cq_info_tbl || NULL == hobul_p->pd_info_tbl) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + cq_idx = cq_num & hobul_p->cq_idx_mask; + VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if (hobul_p->cq_info_tbl[cq_idx].cq_num != cq_num || + E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) + { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl; + + status = THHUL_cqm_count_cqe( + hobul_p->hhul_hndl, hhul_cq_hndl, p_n_cqes ); + if( status != IB_SUCCESS ) + goto cleanup; + + return IB_SUCCESS; + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + return status; +#endif +} + +ib_api_status_t +mlnx_poll_cq ( + IN const ib_cq_handle_t h_cq, + IN OUT ib_wc_t** const pp_free_wclist, + OUT ib_wc_t** const pp_done_wclist ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_cq); + UNREFERENCED_PARAMETER(pp_free_wclist); + UNREFERENCED_PARAMETER(pp_done_wclist); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_poll_cq not implemented\n")); + return IB_INVALID_CA_HANDLE; +#else + ib_api_status_t status = IB_UNKNOWN_ERROR; + + u_int32_t hca_idx = CQ_HCA_FROM_HNDL(h_cq); + u_int32_t cq_num = CQ_NUM_FROM_HNDL(h_cq); + u_int32_t cq_idx; + mlnx_hobul_t *hobul_p; + HHUL_cq_hndl_t hhul_cq_hndl; +#if !MLNX_POLL_NATIVE + HH_ret_t ret; + VAPI_wc_desc_t comp_desc; + ib_wc_t *wc_p; +#endif + + if (!pp_free_wclist || !pp_done_wclist || !*pp_free_wclist) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p || NULL == hobul_p->cq_info_tbl || NULL == hobul_p->pd_info_tbl) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + cq_idx = cq_num & hobul_p->cq_idx_mask; + VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if (hobul_p->cq_info_tbl[cq_idx].cq_num != cq_num || + E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl; + + return THHUL_cqm_poll4wc(hobul_p->hhul_hndl, hhul_cq_hndl, + pp_free_wclist, pp_done_wclist ); + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + return status; +#endif +} + +ib_api_status_t +mlnx_enable_cq_notify ( + IN const ib_cq_handle_t h_cq, + IN const boolean_t solicited ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_cq); + UNREFERENCED_PARAMETER(solicited); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_enable_cq_notify not implemented\n")); + return IB_INVALID_CA_HANDLE; +#else + ib_api_status_t status = IB_UNKNOWN_ERROR; + + u_int32_t hca_idx = CQ_HCA_FROM_HNDL(h_cq); + u_int32_t cq_num = CQ_NUM_FROM_HNDL(h_cq); + u_int32_t cq_idx; + mlnx_hobul_t *hobul_p; + HHUL_cq_hndl_t hhul_cq_hndl; + VAPI_cq_notif_type_t hh_request; + + hh_request = (solicited) ? + VAPI_SOLIC_COMP: /* Notify on solicited completion event only */ + VAPI_NEXT_COMP; /* Notify on next completion */ + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p || NULL == hobul_p->cq_info_tbl || NULL == hobul_p->pd_info_tbl) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + cq_idx = cq_num & hobul_p->cq_idx_mask; + VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if (hobul_p->cq_info_tbl[cq_idx].cq_num != cq_num || + E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) + { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl; + + if (HH_OK != THHUL_cqm_req_comp_notif(hobul_p->hhul_hndl, hhul_cq_hndl, hh_request)) + { + status = IB_ERROR; + goto cleanup; + } + + return IB_SUCCESS; + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + return status; +#endif +} + +ib_api_status_t +mlnx_enable_ncomp_cq_notify ( + IN const ib_cq_handle_t h_cq, + IN const uint32_t n_cqes ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_cq); + UNREFERENCED_PARAMETER(n_cqes); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_enable_ncomp_cq_notify not implemented\n")); + return IB_INVALID_CA_HANDLE; +#else + ib_api_status_t status = IB_UNKNOWN_ERROR; + + u_int32_t hca_idx = CQ_HCA_FROM_HNDL(h_cq); + u_int32_t cq_num = CQ_NUM_FROM_HNDL(h_cq); + u_int32_t cq_idx; + mlnx_hobul_t *hobul_p; + HHUL_cq_hndl_t hhul_cq_hndl; + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p || NULL == hobul_p->cq_info_tbl || NULL == hobul_p->pd_info_tbl) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + cq_idx = cq_num & hobul_p->cq_idx_mask; + VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if (hobul_p->cq_info_tbl[cq_idx].cq_num != cq_num || + E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) + { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl; + + if (HH_OK != THHUL_cqm_req_ncomp_notif( + hobul_p->hhul_hndl, hhul_cq_hndl, n_cqes )) + { + status = IB_ERROR; + goto cleanup; + } + + return IB_SUCCESS; + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + return status; +#endif +} + +ib_api_status_t +mlnx_bind_mw ( + IN const ib_mw_handle_t h_mw, + IN const ib_qp_handle_t h_qp, + IN ib_bind_wr_t* const p_mw_bind, + OUT net32_t* const p_rkey ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_mw); + UNREFERENCED_PARAMETER(h_qp); + UNREFERENCED_PARAMETER(p_mw_bind); + UNREFERENCED_PARAMETER(p_rkey); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_bind_mw not implemented\n")); + return IB_INVALID_CA_HANDLE; +#else + ib_api_status_t status = IB_UNKNOWN_ERROR; + + u_int32_t hca_idx = QP_HCA_FROM_HNDL(h_qp); + u_int32_t qp_num = QP_NUM_FROM_HNDL(h_qp); + u_int32_t qp_idx = 0; + u_int32_t new_key; + mlnx_hobul_t *hobul_p; + mlnx_mwo_t *mwo_p; + HHUL_qp_hndl_t hhul_qp_hndl; + HHUL_mw_bind_t bind_props; + + // CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + mwo_p = (mlnx_mwo_t *)h_mw; + if (!mwo_p || mwo_p->mark != E_MARK_MW) { + status = IB_INVALID_MW_HANDLE; + goto cleanup; + } + + if (!p_rkey) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_QP_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p || NULL == hobul_p->qp_info_tbl || NULL == hobul_p->pd_info_tbl) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + qp_idx = qp_num & hobul_p->qp_idx_mask; + // CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("line %d - qp_idx 0x%x\n", __LINE__, qp_idx)); + + VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup); + +#if 0 + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("line %d - qp_num 0x%x valid %d\n", + __LINE__, + hobul_p->qp_info_tbl[qp_idx].qp_num, + E_MARK_QP == hobul_p->qp_info_tbl[qp_idx].mark)); +#endif + if (hobul_p->qp_info_tbl[qp_idx].qp_num != qp_num || + E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + /* Trap the RKEY passed in not matching. */ + if ( cl_ntoh32( p_mw_bind->current_rkey ) != mwo_p->mw_rkey ) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + hhul_qp_hndl = hobul_p->qp_info_tbl[qp_idx].hhul_qp_hndl; + + mlnx_conv_bind_req(hobul_p->qp_info_tbl[qp_idx].hhul_qp_hndl, p_mw_bind, &bind_props); + + // Binding a window to zero length is in fact an unbinding + // IF unbinding, window rkey remains the same. + // IF binding, new r_key tag is the previous tag incremented by 1: + new_key = mwo_p->mw_rkey; + if( bind_props.size > 0 ) { + new_key += (1 << hobul_p->log2_mpt_size); + } + + if (HH_OK != THHUL_qpm_post_bind_req(&bind_props, new_key)) + { + status = IB_ERROR; + goto cleanup; + } + + *p_rkey = cl_hton32( new_key ); + // CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + // CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + + +void +mlnx_direct_if( + IN OUT ci_interface_t *p_interface ) +{ + p_interface->post_send = mlnx_post_send; + p_interface->post_recv = mlnx_post_recv; + + p_interface->enable_ncomp_cq_notify = mlnx_enable_ncomp_cq_notify; + p_interface->peek_cq = mlnx_peek_cq; + p_interface->poll_cq = mlnx_poll_cq; + p_interface->enable_cq_notify = mlnx_enable_cq_notify; + + p_interface->bind_mw = mlnx_bind_mw; +} diff --git a/branches/MTHCA/hw/mthca/kernel/hca_driver.c b/branches/MTHCA/hw/mthca/kernel/hca_driver.c new file mode 100644 index 00000000..bc73dc1c --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/hca_driver.c @@ -0,0 +1,1748 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: hca_driver.c 46 2005-05-30 17:55:53Z sleybo $ + */ + + +/* + * Provides the driver entry points for the Tavor VPD. + */ + + +#include "hca_driver.h" +#include +#include +#pragma warning( push, 3 ) +//#include "MdCard.h" +#pragma warning( pop ) +#include + + +/* + * UVP name does not include file extension. For debug builds, UAL + * will append "d.dll". For release builds, UAL will append ".dll" + */ +char mlnx_uvp_lib_name[MAX_LIB_NAME] = {"mt23108u"}; + + +NTSTATUS +DriverEntry( + IN PDRIVER_OBJECT p_driver_obj, + IN PUNICODE_STRING p_registry_path ); + +static NTSTATUS +__read_registry( + IN UNICODE_STRING* const p_Param_Path ); + +static void +hca_drv_unload( + IN PDRIVER_OBJECT p_driver_obj ); + +static NTSTATUS +hca_sysctl( + IN PDEVICE_OBJECT p_dev_obj, + IN PIRP p_irp ); + +#ifdef WIN_TO_BE_REMOVED + +static NTSTATUS +hca_add_device( + IN PDRIVER_OBJECT p_driver_obj, + IN PDEVICE_OBJECT p_pdo ); +// +//static NTSTATUS +//hca_enable( +// IN DEVICE_OBJECT* const p_dev_obj ); +// +//static NTSTATUS +//hca_disable( +// IN DEVICE_OBJECT* const p_dev_obj ); + +static NTSTATUS +hca_start( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static void +hca_release_resources( + IN DEVICE_OBJECT* const p_dev_obj ); + +//static NTSTATUS +//hca_deactivate( +// IN DEVICE_OBJECT* const p_dev_obj, +// IN IRP* const p_irp, +// OUT cl_irp_action_t* const p_action ); +// +static NTSTATUS +hca_query_bus_relations( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +hca_set_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static ci_interface_t* +__alloc_hca_ifc( + IN hca_dev_ext_t* const p_ext ); + +static NTSTATUS +__get_ci_interface( + IN DEVICE_OBJECT* const p_dev_obj ); + +static NTSTATUS +__get_hca_handle( + IN hca_dev_ext_t* const p_ext ); + +static NTSTATUS +__hca_register( + IN DEVICE_OBJECT *p_dev_obj ); + +#endif +//static void +//__work_item_pnp_cb( +// IN DEVICE_OBJECT *p_dev_obj, +// IN hca_work_item_context_t *p_context ); + +static NTSTATUS +__pnp_notify_target( + IN TARGET_DEVICE_REMOVAL_NOTIFICATION *p_notify, + IN void *context ); + +static NTSTATUS +__pnp_notify_ifc( + IN DEVICE_INTERFACE_CHANGE_NOTIFICATION *p_notify, + IN void *context ); + +static NTSTATUS +fw_access_pciconf ( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN ULONG op_flag, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ); + +static NTSTATUS +fw_get_pci_bus_interface( + IN DEVICE_OBJECT *p_dev_obj, + OUT BUS_INTERFACE_STANDARD *p_BusInterface ); + +static NTSTATUS +fw_flash_write_data ( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ); + +static NTSTATUS +fw_flash_read_data ( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ); + +static NTSTATUS +fw_flash_get_ca_guid( + IN DEVICE_OBJECT *p_dev_obj, + OUT uint64_t *ca_guid ); + +static NTSTATUS +fw_flash_read4( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN uint32_t addr, + IN OUT uint32_t *p_data); + +static NTSTATUS +fw_flash_readbuf( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN uint32_t offset, + IN OUT void *p_data, + IN uint32_t len); +static NTSTATUS +fw_set_bank( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN uint32_t bank ); + +static NTSTATUS +fw_flash_init( + IN BUS_INTERFACE_STANDARD *p_BusInterface ); + +static NTSTATUS +fw_flash_deinit( + IN BUS_INTERFACE_STANDARD *p_BusInterface ); + +#ifdef ALLOC_PRAGMA +#pragma alloc_text (INIT, DriverEntry) +#pragma alloc_text (INIT, __read_registry) +#pragma alloc_text (PAGE, hca_drv_unload) +#pragma alloc_text (PAGE, hca_sysctl) +#ifdef WIN_TO_BE_REMOVED +#pragma alloc_text (PAGE, hca_add_device) +#pragma alloc_text (PAGE, hca_start) +//#pragma alloc_text (PAGE, hca_deactivate) +//#pragma alloc_text (PAGE, hca_enable) +//#pragma alloc_text (PAGE, hca_disable) +#pragma alloc_text (PAGE, hca_release_resources) +#pragma alloc_text (PAGE, hca_query_bus_relations) +#pragma alloc_text (PAGE, hca_set_power) +#pragma alloc_text (PAGE, __alloc_hca_ifc) +#pragma alloc_text (PAGE, __get_ci_interface) +#pragma alloc_text (PAGE, __get_hca_handle) +#pragma alloc_text (PAGE, __hca_register) +//#pragma alloc_text (PAGE, __work_item_pnp_cb) +#pragma alloc_text (PAGE, __pnp_notify_target) +#pragma alloc_text (PAGE, __pnp_notify_ifc) +#endif +#pragma alloc_text (PAGE, fw_flash_get_ca_guid) +#endif + +#ifdef WIN_TO_BE_REMOVED +static const cl_vfptr_pnp_po_t hca_vfptr_pnp = { + "Tavor HCA VPD", + hca_start, /* StartDevice */ + cl_irp_skip, + cl_irp_skip, + cl_do_sync_pnp, + cl_irp_skip, /* QueryRemove */ + hca_release_resources, + cl_do_remove, /* Remove */ + cl_irp_skip, /* CancelRemove */ + cl_irp_skip, /* SurpriseRemove */ + cl_irp_skip, + cl_irp_skip, + cl_irp_skip, + cl_do_sync_pnp, + hca_query_bus_relations, + cl_irp_ignore, + cl_irp_skip, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, /* QueryPower */ + hca_set_power, /* SetPower */ + cl_irp_ignore, /* PowerSequence */ + cl_irp_ignore /* WaitWake */ +}; + +#endif + +NTSTATUS +DriverEntry( + IN PDRIVER_OBJECT p_driver_obj, + IN PUNICODE_STRING p_registry_path ) +{ + NTSTATUS status; + cl_status_t cl_status; + + HCA_ENTER( HCA_DBG_DEV ); + + status = __read_registry( p_registry_path ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("__read_registry_path returned 0x%X.\n", status) ); + return status; + } + + /* Initialize Adapter DB */ + cl_status = mlnx_hcas_init(); + if( cl_status != CL_SUCCESS ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("mlnx_hcas_init returned %s.\n", cl_status_text[cl_status]) ); + return cl_to_ntstatus( cl_status ); + } +// cl_memclr( mlnx_hca_array, MLNX_MAX_HCA * sizeof(ci_interface_t) ); + + /*leo: init function table */ + hca_init_vfptr(); + + /*leo: calibrate CPU */ + MT_time_calibrate(); + + p_driver_obj->MajorFunction[IRP_MJ_PNP] = cl_pnp; + p_driver_obj->MajorFunction[IRP_MJ_POWER] = cl_power; + p_driver_obj->MajorFunction[IRP_MJ_SYSTEM_CONTROL] = hca_sysctl; + p_driver_obj->DriverUnload = hca_drv_unload; + p_driver_obj->DriverExtension->AddDevice = hca_add_device; + + HCA_EXIT( HCA_DBG_DEV ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +__read_registry( + IN UNICODE_STRING* const p_registry_path ) +{ + NTSTATUS status; + /* Remember the terminating entry in the table below. */ + RTL_QUERY_REGISTRY_TABLE table[2]; + UNICODE_STRING param_path; + + HCA_ENTER( HCA_DBG_DEV ); + + RtlInitUnicodeString( ¶m_path, NULL ); + param_path.MaximumLength = p_registry_path->Length + + sizeof(L"\\Parameters"); + param_path.Buffer = cl_zalloc( param_path.MaximumLength ); + if( !param_path.Buffer ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("Failed to allocate parameters path buffer.\n") ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + RtlAppendUnicodeStringToString( ¶m_path, p_registry_path ); + RtlAppendUnicodeToString( ¶m_path, L"\\Parameters" ); + + /* + * Clear the table. This clears all the query callback pointers, + * and sets up the terminating table entry. + */ + cl_memclr( table, sizeof(table) ); + + /* Setup the table entries. */ + table[0].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[0].Name = L"DebugFlags"; + table[0].EntryContext = &g_mlnx_dbg_lvl; + table[0].DefaultType = REG_DWORD; + table[0].DefaultData = &g_mlnx_dbg_lvl; + table[0].DefaultLength = sizeof(ULONG); + + /* Have at it! */ + status = RtlQueryRegistryValues( RTL_REGISTRY_ABSOLUTE, + param_path.Buffer, table, NULL, NULL ); + + cl_free( param_path.Buffer ); + HCA_EXIT( HCA_DBG_DEV ); + return status; +} + + +static void +hca_drv_unload( + IN PDRIVER_OBJECT p_driver_obj ) +{ + HCA_ENTER( HCA_DBG_DEV ); + + UNUSED_PARAM( p_driver_obj ); + + HCA_EXIT( HCA_DBG_DEV ); +} + + +static NTSTATUS +hca_sysctl( + IN PDEVICE_OBJECT p_dev_obj, + IN PIRP p_irp ) +{ + NTSTATUS status; + hca_dev_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_DEV ); + + p_ext = p_dev_obj->DeviceExtension; + + IoSkipCurrentIrpStackLocation( p_irp ); + status = IoCallDriver( p_ext->cl_ext.p_next_do, p_irp ); + + HCA_EXIT( HCA_DBG_DEV ); + return status; +} + +#ifdef WIN_TO_BE_REMOVED + +static NTSTATUS +hca_add_device( + IN PDRIVER_OBJECT p_driver_obj, + IN PDEVICE_OBJECT p_pdo ) +{ + NTSTATUS status; + DEVICE_OBJECT *p_dev_obj, *p_next_do; + hca_dev_ext_t *p_ext; + //cl_status_t cl_status; + + HCA_ENTER( HCA_DBG_PNP ); + + /* + * Create the device so that we have a device extension to store stuff in. + */ + status = IoCreateDevice( p_driver_obj, sizeof(hca_dev_ext_t), + NULL, FILE_DEVICE_INFINIBAND, FILE_DEVICE_SECURE_OPEN, + FALSE, &p_dev_obj ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("IoCreateDevice returned 0x%08X.\n", status) ); + return status; + } + + p_ext = p_dev_obj->DeviceExtension; + + //cl_status = cl_event_init( &p_ext->mutex, FALSE ); + //if( cl_status != CL_SUCCESS ) + //{ + // IoDeleteDevice( p_dev_obj ); + // HCA_TRACE_EXIT( HCA_DBG_ERROR, + // ("cl_mutex_init returned %s.\n", cl_status_text[status]) ); + // return cl_to_ntstatus( status ); + //} + //cl_event_signal( &p_ext->mutex ); + + /* Attach to the device stack. */ + p_next_do = IoAttachDeviceToDeviceStack( p_dev_obj, p_pdo ); + if( !p_next_do ) + { + //cl_event_destroy( &p_ext->mutex ); + IoDeleteDevice( p_dev_obj ); + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("IoAttachDeviceToDeviceStack failed.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + /* Inititalize the complib extension. */ + cl_init_pnp_po_ext( p_dev_obj, p_next_do, p_pdo, g_mlnx_dbg_lvl, + &hca_vfptr_pnp, NULL ); + + p_ext->state = HCA_ADDED; + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static NTSTATUS +__get_ci_interface( + IN DEVICE_OBJECT* const p_dev_obj ) +{ + NTSTATUS status; + IRP *p_irp; + hca_dev_ext_t *p_ext; + IO_STATUS_BLOCK io_status; + IO_STACK_LOCATION *p_io_stack; + KEVENT event; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + KeInitializeEvent( &event, NotificationEvent, FALSE ); + + /* Query for the verbs interface. */ + p_irp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP, p_ext->p_al_dev, + NULL, 0, NULL, &event, &io_status ); + if( !p_irp ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("IoBuildSynchronousFsdRequest failed.\n") ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + /* Format the IRP. */ + p_io_stack = IoGetNextIrpStackLocation( p_irp ); + p_io_stack->MinorFunction = IRP_MN_QUERY_INTERFACE; + p_io_stack->Parameters.QueryInterface.Version = IB_CI_INTERFACE_VERSION; + p_io_stack->Parameters.QueryInterface.Size = sizeof(ib_ci_ifc_t); + p_io_stack->Parameters.QueryInterface.Interface = + (INTERFACE*)&p_ext->ci_ifc; + p_io_stack->Parameters.QueryInterface.InterfaceSpecificData = NULL; + p_io_stack->Parameters.QueryInterface.InterfaceType = + &GUID_IB_CI_INTERFACE; + p_irp->IoStatus.Status = STATUS_NOT_SUPPORTED; + + /* Send the IRP. */ + status = IoCallDriver( p_ext->p_al_dev, p_irp ); + if( status == STATUS_PENDING ) + { + KeWaitForSingleObject( &event, Executive, KernelMode, + FALSE, NULL ); + + status = io_status.Status; + } + + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("Query interface for verbs returned %08x.\n", status) ); + return status; + } + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static NTSTATUS +__get_hca_handle( + IN hca_dev_ext_t* const p_ext ) +{ +#ifdef LINUX_TO_BE_CHANGED + NTSTATUS status; + IRP *p_irp; + IO_STATUS_BLOCK io_status; + IO_STACK_LOCATION *p_io_stack; + KEVENT event; + + HCA_ENTER( HCA_DBG_PNP ); + + KeInitializeEvent( &event, NotificationEvent, FALSE ); + + /* Query for the verbs interface. */ + p_irp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP, p_ext->cl_ext.p_next_do, + NULL, 0, NULL, &event, &io_status ); + if( !p_irp ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("IoBuildSynchronousFsdRequest failed.\n") ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + /* Format the IRP. */ + p_io_stack = IoGetNextIrpStackLocation( p_irp ); + p_io_stack->MinorFunction = IRP_MN_QUERY_INTERFACE; + p_io_stack->Parameters.QueryInterface.Version = 1; + p_io_stack->Parameters.QueryInterface.Size = 0; + p_io_stack->Parameters.QueryInterface.Interface = NULL; + p_io_stack->Parameters.QueryInterface.InterfaceSpecificData = + &p_ext->hca.hh_hndl; + p_io_stack->Parameters.QueryInterface.InterfaceType = + &GUID_MD_INTERFACE; + p_irp->IoStatus.Status = STATUS_NOT_SUPPORTED; + + /* Send the IRP. */ + status = IoCallDriver( p_ext->cl_ext.p_next_do, p_irp ); + if( status == STATUS_PENDING ) + { + KeWaitForSingleObject( &event, Executive, KernelMode, + FALSE, NULL ); + + status = io_status.Status; + } + + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("Query interface for HCA handle returned %08x.\n", status) ); + return status; + } + +HCA_EXIT( HCA_DBG_PNP ); +return status; + +#else + UNREFERENCED_PARAMETER(p_ext); + return 0; +#endif +} + + +static NTSTATUS +__pnp_notify_target( + IN TARGET_DEVICE_REMOVAL_NOTIFICATION *p_notify, + IN void *context ) +{ + NTSTATUS status = STATUS_SUCCESS; + DEVICE_OBJECT *p_dev_obj; + hca_dev_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_PNP ); + + p_dev_obj = context; + p_ext = p_dev_obj->DeviceExtension; + + if( IsEqualGUID( &p_notify->Event, &GUID_TARGET_DEVICE_QUERY_REMOVE ) ) + { + if( p_ext->state == HCA_REGISTERED ) + { + /* Release AL's CI interface. */ + p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); + p_ext->state = HCA_STARTED; + } + + /* Release AL's file object so that it can unload. */ + CL_ASSERT( p_ext->p_al_file_obj ); + CL_ASSERT( p_ext->p_al_file_obj == p_notify->FileObject ); + ObDereferenceObject( p_ext->p_al_file_obj ); + p_ext->p_al_file_obj = NULL; + p_ext->p_al_dev = NULL; + } + else if( IsEqualGUID( &p_notify->Event, + &GUID_TARGET_DEVICE_REMOVE_COMPLETE ) ) + { + if( p_ext->state == HCA_REGISTERED ) + { + /* Release AL's CI interface. */ + p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); + p_ext->state = HCA_STARTED; + } + + /* Release AL's file object so that it can unload. */ + if( p_ext->p_al_file_obj ) + { + ObDereferenceObject( p_ext->p_al_file_obj ); + p_ext->p_al_file_obj = NULL; + p_ext->p_al_dev = NULL; + } + + /* Cancel our target device change registration. */ + IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry ); + p_ext->pnp_target_entry = NULL; + } + else if( IsEqualGUID( &p_notify->Event, + &GUID_TARGET_DEVICE_REMOVE_CANCELLED ) ) + { + /* Cancel our target device change registration. */ + IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry ); + p_ext->pnp_target_entry = NULL; + + /* Get the device object pointer for the AL. */ + CL_ASSERT( !p_ext->p_al_file_obj ); + CL_ASSERT( !p_ext->p_al_dev ); + p_ext->p_al_file_obj = p_notify->FileObject; + p_ext->p_al_dev = IoGetRelatedDeviceObject( p_ext->p_al_file_obj ); + + status = IoRegisterPlugPlayNotification( + EventCategoryTargetDeviceChange, 0, p_ext->p_al_file_obj, + p_dev_obj->DriverObject, __pnp_notify_target, p_dev_obj, + &p_ext->pnp_target_entry ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("IoRegisterPlugPlayNotification returned %08x.\n") ); + return status; + } + + __hca_register( p_dev_obj ); + } + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static ci_interface_t* +__alloc_hca_ifc( + IN hca_dev_ext_t* const p_ext ) +{ + ci_interface_t *p_ifc; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ifc = ExAllocatePool( PagedPool, sizeof(ci_interface_t) ); + if( !p_ifc ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("Failed to allocate ci_interface_t (%d bytes).\n", + sizeof(ci_interface_t)) ); + return NULL; + } + +#ifdef LINUX_TO_BE_CHANGED + setup_ci_interface( p_ext->hca.guid, p_ifc ); +#endif + + p_ifc->p_hca_dev = p_ext->cl_ext.p_pdo; +#ifdef LINUX_TO_BE_CHANGED + p_ifc->vend_id = p_ext->hca.hh_hndl->vendor_id; + p_ifc->dev_id = (uint16_t)p_ext->hca.hh_hndl->dev_id; + p_ifc->dev_revision = (uint16_t)p_ext->hca.hh_hndl->hw_ver; +#endif + HCA_EXIT( HCA_DBG_PNP ); + return p_ifc; +} + + +static NTSTATUS +__hca_register( + IN DEVICE_OBJECT *p_dev_obj ) +{ + hca_dev_ext_t *p_ext; + NTSTATUS status; + ib_api_status_t ib_status; + ci_interface_t *p_hca_ifc; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + ASSERT( p_ext->state == HCA_STARTED ); + ASSERT( p_ext->p_al_dev ); + + /* Get the AL's lower interface. */ + status = __get_ci_interface( p_dev_obj ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("__get_ci_interface returned %08x.\n", status) ); + return status; + } + + /* Allocate and populate our HCA interface structure. */ + p_hca_ifc = __alloc_hca_ifc( p_ext ); + if( !p_hca_ifc ) + { + HCA_TRACE( HCA_DBG_ERROR, ("__alloc_hca_ifc failed.\n") ); + return STATUS_NO_MEMORY; + } + + /* Notify AL that we're available... */ + ib_status = p_ext->ci_ifc.register_ca( p_hca_ifc ); + ExFreePool( p_hca_ifc ); + if( ib_status != IB_SUCCESS ) + { + p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + p_ext->state = HCA_REGISTERED; + return STATUS_SUCCESS; +} + + +//static void +//__work_item_pnp_cb( +// IN DEVICE_OBJECT *p_dev_obj, +// IN hca_work_item_context_t *p_context ) +//{ +// hca_dev_ext_t *p_ext; +// NTSTATUS status; +// +// HCA_ENTER( HCA_DBG_PNP ); +// +// p_ext = p_dev_obj->DeviceExtension; +// +// cl_event_wait_on( &p_ext->mutex, EVENT_NO_TIMEOUT, FALSE ); +// do +// { +// /* Check the state under protection of the mutex. */ +// if( p_ext->state != HCA_ADDED && +// p_ext->state != HCA_STARTED ) +// { +// HCA_TRACE( HCA_DBG_ERROR, ("Invalid state.\n") ); +// break; +// } +// +// ASSERT( !p_ext->p_al_dev ); +// +// /* Get the AL device object. */ +// HCA_TRACE( HCA_DBG_PNP, ("Calling IoGetDeviceObjectPointer.\n") ); +// status = IoGetDeviceObjectPointer( &p_context->sym_link_name, +// FILE_ALL_ACCESS, &p_ext->p_al_file_obj, &p_ext->p_al_dev ); +// if( !NT_SUCCESS( status ) ) +// { +// HCA_TRACE( HCA_DBG_ERROR, +// ("IoGetDeviceObjectPointer returned %08x.\n", status) ); +// break; +// } +// +// cl_event_signal( &p_ext->mutex ); +// /* Register for removal notification of the IB Fabric root device. */ +// HCA_TRACE( HCA_DBG_PNP, +// ("Registering for target notifications.\n") ); +// status = IoRegisterPlugPlayNotification( +// EventCategoryTargetDeviceChange, 0, p_ext->p_al_file_obj, +// p_dev_obj->DriverObject, __pnp_notify_target, p_dev_obj, +// &p_ext->pnp_target_entry ); +// cl_event_wait_on( &p_ext->mutex, EVENT_NO_TIMEOUT, FALSE ); +// if( !NT_SUCCESS( status ) ) +// { +// ObDereferenceObject( p_ext->p_al_file_obj ); +// HCA_TRACE( HCA_DBG_ERROR, +// ("IoRegisterPlugPlayNotification returned %08x.\n", status) ); +// break; +// } +// +// if( p_ext->state == HCA_STARTED ) +// { +// /* Queue the work item again to complete the registration. */ +// IoQueueWorkItem( p_context->p_item, __work_item_started_cb, +// DelayedWorkQueue, p_context->p_item ); +// } +// else +// { +// /* Free the work item. */ +// IoFreeWorkItem( p_context->p_item ); +// } +// } while( !p_ext ); +// +// cl_event_signal( &p_ext->mutex ); +// cl_free( p_context ); +// return; +//} + + +static NTSTATUS +__pnp_notify_ifc( + IN DEVICE_INTERFACE_CHANGE_NOTIFICATION *p_notify, + IN void *context ) +{ + NTSTATUS status; + DEVICE_OBJECT *p_dev_obj; + hca_dev_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_PNP ); + + p_dev_obj = context; + p_ext = p_dev_obj->DeviceExtension; + + if( !IsEqualGUID( &p_notify->Event, &GUID_DEVICE_INTERFACE_ARRIVAL ) ) + { + HCA_EXIT( HCA_DBG_PNP ); + return STATUS_SUCCESS; + } + + /* + * Sanity check. We should only be getting notifications of the + * CI interface exported by AL. + */ + ASSERT( + IsEqualGUID( &p_notify->InterfaceClassGuid, &GUID_IB_CI_INTERFACE ) ); + + if( p_ext->state != HCA_STARTED ) + { + HCA_TRACE( HCA_DBG_ERROR, ("Invalid state: %d\n", p_ext->state) ); + return STATUS_SUCCESS; + } + + ASSERT( !p_ext->p_al_dev ); + ASSERT( !p_ext->p_al_file_obj ); + + /* Get the AL device object. */ + HCA_TRACE( HCA_DBG_PNP, ("Calling IoGetDeviceObjectPointer.\n") ); + status = IoGetDeviceObjectPointer( p_notify->SymbolicLinkName, + FILE_ALL_ACCESS, &p_ext->p_al_file_obj, &p_ext->p_al_dev ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("IoGetDeviceObjectPointer returned %08x.\n", status) ); + return STATUS_SUCCESS; + } + + /* Register for removal notification of the IB Fabric root device. */ + HCA_TRACE( HCA_DBG_PNP, + ("Registering for target notifications.\n") ); + status = IoRegisterPlugPlayNotification( + EventCategoryTargetDeviceChange, 0, p_ext->p_al_file_obj, + p_dev_obj->DriverObject, __pnp_notify_target, p_dev_obj, + &p_ext->pnp_target_entry ); + if( !NT_SUCCESS( status ) ) + { + ObDereferenceObject( p_ext->p_al_file_obj ); + p_ext->p_al_file_obj = NULL; + p_ext->p_al_dev = NULL; + HCA_TRACE( HCA_DBG_ERROR, + ("IoRegisterPlugPlayNotification returned %08x.\n", status) ); + return STATUS_SUCCESS; + } + + status = __hca_register( p_dev_obj ); + if( !NT_SUCCESS( status ) ) + { + IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry ); + p_ext->pnp_target_entry = NULL; + ObDereferenceObject( p_ext->p_al_file_obj ); + p_ext->p_al_file_obj = NULL; + p_ext->p_al_dev = NULL; + HCA_TRACE( HCA_DBG_ERROR, + ("__get_ci_interface returned %08x.\n", status) ); + return STATUS_SUCCESS; + } + + HCA_EXIT( HCA_DBG_PNP ); + return STATUS_SUCCESS; +} +// +// +//static NTSTATUS +//hca_enable( +// IN DEVICE_OBJECT* const p_dev_obj ) +//{ +// PIO_WORKITEM p_item; +// hca_dev_ext_t *p_ext; +// +// HCA_ENTER( HCA_DBG_PNP ); +// +// p_ext = p_dev_obj->DeviceExtension; +// +// /* Check for the AL device reference. */ +// if( p_ext->p_al_dev ) +// { +// __hca_register( p_dev_obj ); +// } +// p_ext->state = HCA_STARTED; +// +// HCA_EXIT( HCA_DBG_PNP ); +// return STATUS_SUCCESS; +//} + + +static NTSTATUS +hca_start( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + hca_dev_ext_t *p_ext; + net64_t ca_guid = 0; + HCA_ENTER( HCA_DBG_PNP ); + + /* Handled on the way up. */ + status = cl_do_sync_pnp( p_dev_obj, p_irp, p_action ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("Lower drivers failed IRP_MN_START_DEVICE.\n") ); + return status; + } + + p_ext = p_dev_obj->DeviceExtension; + +#ifdef WIN_TO_BE_CHANGED + /* Get the HH HCA handle for this instance. */ + status = __get_hca_handle( p_ext ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("Failed to get HH HCA handle.\n") ); + return status; + } +#else + // (1) do all low driver work upon START DEVICE, but card initialization + + // (2) card initialization + +#endif + + + /* store HCA PDO */ + p_ext->hca.p_dev_obj = p_ext->cl_ext.p_pdo; + status = fw_flash_get_ca_guid(p_ext->cl_ext.p_pdo, &ca_guid); + if ( !NT_SUCCESS( status ) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("fw_flash_get_ca_guid failed status =%#x.\n", status) ); + return status; + } + p_ext->hca.guid = ca_guid; + mlnx_hca_insert( &p_ext->hca ); + + /* + * Change the state since the PnP callback can happen + * before the callback returns. + */ + p_ext->state = HCA_STARTED; + /* Register for interface arrival of the IB_AL device. */ + status = IoRegisterPlugPlayNotification( + EventCategoryDeviceInterfaceChange, + PNPNOTIFY_DEVICE_INTERFACE_INCLUDE_EXISTING_INTERFACES, + (void*)&GUID_IB_CI_INTERFACE, p_dev_obj->DriverObject, + __pnp_notify_ifc, p_dev_obj, &p_ext->pnp_ifc_entry ); + if( !NT_SUCCESS( status ) ) + { + p_ext->state = HCA_ADDED; + HCA_TRACE( HCA_DBG_ERROR, + ("IoRegisterPlugPlayNotification returned %08x.\n", status) ); + } + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static void +hca_release_resources( + IN DEVICE_OBJECT* const p_dev_obj ) +{ + hca_dev_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + if( p_ext->state == HCA_REGISTERED ) + { + CL_ASSERT( p_ext->ci_ifc.deregister_ca ); + CL_ASSERT( p_ext->p_al_dev ); + CL_ASSERT( p_ext->p_al_file_obj ); + /* Notify AL that the CA is being removed. */ + p_ext->ci_ifc.deregister_ca( p_ext->hca.guid ); + /* Release AL's CI interface. */ + p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); + } + + if( p_ext->pnp_target_entry ) + { + ASSERT( p_ext->pnp_ifc_entry ); + IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry ); + } + + if( p_ext->pnp_ifc_entry ) + IoUnregisterPlugPlayNotification( p_ext->pnp_ifc_entry ); + + if( p_ext->p_al_file_obj ) + ObDereferenceObject( p_ext->p_al_file_obj ); + + //cl_event_destroy( &p_ext->mutex ); + + HCA_EXIT( HCA_DBG_PNP ); +} +// +// +//static NTSTATUS +//hca_disable( +// IN DEVICE_OBJECT* const p_dev_obj ) +//{ +// hca_dev_ext_t *p_ext; +// +// HCA_ENTER( HCA_DBG_PNP ); +// +// p_ext = p_dev_obj->DeviceExtension; +// +// ASSERT( p_ext->state == HCA_STARTED ); +// +// if( p_ext->state = HCA_REGISTERED ) +// { +// /* Notify AL that the CA is being removed. */ +// p_ext->ci_ifc.deregister_ca( p_ext->hca.guid ); +// /* Release AL's CI interface. */ +// p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); +// +// p_ext->state = HCA_STARTED; +// } +// +// HCA_EXIT( HCA_DBG_PNP ); +// return STATUS_SUCCESS; +//} +// +// +//static NTSTATUS +//hca_deactivate( +// IN DEVICE_OBJECT* const p_dev_obj, +// IN IRP* const p_irp, +// OUT cl_irp_action_t* const p_action ) +//{ +// NTSTATUS status; +// hca_dev_ext_t *p_ext; +// +// HCA_ENTER( HCA_DBG_PNP ); +// +// UNUSED_PARAM( p_irp ); +// +// p_ext = p_dev_obj->DeviceExtension; +// +// *p_action = IrpSkip; +// +// status = hca_disable( p_dev_obj ); +// +// mlnx_hca_remove( &p_ext->hca ); +// +// p_ext->hca.hh_hndl = NULL; +// +// p_ext->state = HCA_ADDED; +// +// HCA_EXIT( HCA_DBG_PNP ); +// return status; +//} + + +static NTSTATUS +hca_query_bus_relations( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + DEVICE_RELATIONS *p_rel; + hca_dev_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + //cl_event_wait_on( &p_ext->mutex, EVENT_NO_TIMEOUT, FALSE ); + if( p_ext->state == HCA_REGISTERED ) + { + status = p_ext->ci_ifc.get_relations( p_ext->hca.guid, p_irp ); + if( !NT_SUCCESS( status ) ) + { + //cl_event_signal( &p_ext->mutex ); + *p_action = IrpComplete; + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("AL get_relations returned %08x.\n", status) ); + return status; + } + } + else + { + status = cl_alloc_relations( p_irp, 1 ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("cl_alloc_relations returned %08x.\n", status) ); + return status; + } + + p_rel = (DEVICE_RELATIONS*)p_irp->IoStatus.Information; + p_rel->Count = 0; + p_rel->Objects[0] = NULL; + } + + //cl_event_signal( &p_ext->mutex ); + + *p_action = IrpPassDown; + HCA_EXIT( HCA_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +hca_set_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + hca_dev_ext_t *p_ext; + IO_STACK_LOCATION *p_io_stack; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + *p_action = IrpSkip; + + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + if( p_io_stack->Parameters.Power.Type != DevicePowerState ) + return STATUS_SUCCESS; + + switch( p_io_stack->Parameters.Power.State.DeviceState ) + { + case PowerDeviceD0: + if( p_ext->p_al_dev ) + status = __hca_register( p_dev_obj ); + else + status = STATUS_SUCCESS; + break; + + default: + if( p_ext->state == HCA_REGISTERED ) + { + /* Notify AL that the CA is being removed. */ + p_ext->ci_ifc.deregister_ca( p_ext->hca.guid ); + /* Release AL's CI interface. */ + p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); + + p_ext->state = HCA_STARTED; + } + status = STATUS_SUCCESS; + break; + } + + if( !NT_SUCCESS( status ) ) + *p_action = IrpComplete; + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + +#endif + +typedef struct Primary_Sector{ + uint32_t fi_addr; + uint32_t fi_size; + uint32_t signature; + uint32_t fw_reserved[5]; + uint32_t vsd[56]; + uint32_t branch_to; + uint32_t crc016; +} primary_sector_t; + +static uint32_t old_dir; +static uint32_t old_pol; +static uint32_t old_mod; +static uint32_t old_dat; + +static NTSTATUS +fw_access_pciconf ( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN ULONG op_flag, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ) +{ + + ULONG bytes; + NTSTATUS status = STATUS_SUCCESS; + + PAGED_CODE(); + + if (p_BusInterface) + { + + bytes = p_BusInterface->SetBusData( + p_BusInterface->Context, + PCI_WHICHSPACE_CONFIG, + (PVOID)&offset, + PCI_CONF_ADDR, + sizeof(ULONG) ); + + if( op_flag == 0 ) + { + if ( bytes ) + bytes = p_BusInterface->GetBusData( + p_BusInterface->Context, + PCI_WHICHSPACE_CONFIG, + p_buffer, + PCI_CONF_DATA, + length ); + if ( !bytes ) + status = STATUS_NOT_SUPPORTED; + } + + else + { + if ( bytes ) + bytes = p_BusInterface->SetBusData( + p_BusInterface->Context, + PCI_WHICHSPACE_CONFIG, + p_buffer, + PCI_CONF_DATA, + length); + + if ( !bytes ) + status = STATUS_NOT_SUPPORTED; + } + } + return status; +} + +static NTSTATUS +fw_get_pci_bus_interface( + IN DEVICE_OBJECT *p_dev_obj, + OUT BUS_INTERFACE_STANDARD *p_BusInterface ) +{ + KEVENT event; + NTSTATUS status; + PIRP p_irp; + IO_STATUS_BLOCK ioStatus; + PIO_STACK_LOCATION p_irpStack; + PDEVICE_OBJECT p_target_obj; + + KeInitializeEvent( &event, NotificationEvent, FALSE ); + + p_target_obj = IoGetAttachedDeviceReference( p_dev_obj ); + + p_irp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP, + p_target_obj, + NULL, + 0, + NULL, + &event, + &ioStatus ); + if (p_irp == NULL) { + status = STATUS_INSUFFICIENT_RESOURCES; + goto End; + } + p_irpStack = IoGetNextIrpStackLocation( p_irp ); + p_irpStack->MinorFunction = IRP_MN_QUERY_INTERFACE; + p_irpStack->Parameters.QueryInterface.InterfaceType = (LPGUID) &GUID_BUS_INTERFACE_STANDARD; + p_irpStack->Parameters.QueryInterface.Size = sizeof(BUS_INTERFACE_STANDARD); + p_irpStack->Parameters.QueryInterface.Version = 1; + p_irpStack->Parameters.QueryInterface.Interface = (PINTERFACE) p_BusInterface; + p_irpStack->Parameters.QueryInterface.InterfaceSpecificData = NULL; + + p_irp->IoStatus.Status = STATUS_NOT_SUPPORTED; + + status = IoCallDriver( p_target_obj, p_irp ); + + if ( status == STATUS_PENDING ) + { + KeWaitForSingleObject( &event, Executive, KernelMode, FALSE, NULL ); + status = ioStatus.Status; + } +End: + // Done with reference + ObDereferenceObject( p_target_obj ); + return status; +} + +ib_api_status_t +fw_access_ctrl( + IN const void* __ptr64 p_context, + IN const void* __ptr64* const handle_array OPTIONAL, + IN uint32_t num_handles, + IN ib_ci_op_t* const p_ci_op, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + DEVICE_OBJECT *p_dev_obj; + static BUS_INTERFACE_STANDARD BusInterface; + static uint32_t if_ready; + NTSTATUS status; + PVOID p_data; + ULONG offset; + ULONG POINTER_ALIGNMENT length; + ib_ci_op_t *p_ci; + mlnx_hob_t *p_hob; + + UNREFERENCED_PARAMETER(handle_array); + UNREFERENCED_PARAMETER(num_handles); + UNREFERENCED_PARAMETER(p_umv_buf); + + status = STATUS_SUCCESS; + p_hob = (mlnx_hob_t *)(const void *)p_context; + + p_dev_obj = (DEVICE_OBJECT *)(const void *)p_hob->p_dev_obj; + p_ci = p_ci_op; + + if ( !p_ci ) + return STATUS_INVALID_DEVICE_REQUEST; + if ( !p_ci->buf_size ) + return STATUS_INVALID_DEVICE_REQUEST; + + length = p_ci->buf_size; + offset = p_ci->buf_info; + p_data = p_ci->p_buf; + + switch ( p_ci->command ) + { + case FW_READ: // read data from flash + if ( if_ready ) + { + status = fw_flash_read_data(&BusInterface, p_data, offset, length); + } + break; + case FW_WRITE: // write data to flash + if ( if_ready ) + { + + status = fw_flash_write_data(&BusInterface, p_data, offset, length); + } + break; + case FW_READ_CMD: + if ( if_ready ) + { + status = fw_access_pciconf(&BusInterface, 0 , p_data, offset, 4); + } + break; + case FW_WRITE_CMD: + if ( if_ready ) + { + status = fw_access_pciconf(&BusInterface, 1 , p_data, offset, 4); + } + break; + case FW_CLOSE_IF: // close BusInterface + if (if_ready ) + { + if_ready = 0; + BusInterface.InterfaceDereference((PVOID)BusInterface.Context); + } + return status; + case FW_OPEN_IF: // open BusInterface + if ( !if_ready ) + { + status = fw_get_pci_bus_interface(p_dev_obj, &BusInterface); + + if ( NT_SUCCESS( status ) ) + { + if_ready = 1; + status = STATUS_SUCCESS; + } + } + return status; + default: + status = STATUS_NOT_SUPPORTED; + } + + if ( status != STATUS_SUCCESS ) + { + if ( if_ready ) + { + if_ready = 0; + BusInterface.InterfaceDereference((PVOID)BusInterface.Context); + } + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("fw_access_ctrl failed returns %08x.\n", status) ); + } + return status; +} + +static NTSTATUS +fw_flash_write_data ( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ) +{ + NTSTATUS status; + uint32_t cnt = 0; + uint32_t lcl_data; + + lcl_data = (*((uint32_t*)p_buffer) << 24); + + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &lcl_data, FLASH_OFFSET+4, length ); + if ( status != STATUS_SUCCESS ) + return status; + lcl_data = ( WRITE_BIT | (offset & ADDR_MSK)); + + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &lcl_data, FLASH_OFFSET, 4 ); + if ( status != STATUS_SUCCESS ) + return status; + + lcl_data = 0; + + do + { + if (++cnt > 5000) + { + return STATUS_DEVICE_NOT_READY; + } + + status = fw_access_pciconf(p_BusInterface, FW_READ , &lcl_data, FLASH_OFFSET, 4 ); + if ( status != STATUS_SUCCESS ) + return status; + + } while(lcl_data & CMD_MASK); + + return status; +} + +static NTSTATUS +fw_flash_read_data ( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ) +{ + NTSTATUS status = STATUS_SUCCESS; + uint32_t cnt = 0; + uint32_t lcl_data = ( READ_BIT | (offset & ADDR_MSK)); + + status = fw_access_pciconf(p_BusInterface, FW_WRITE, &lcl_data, FLASH_OFFSET, 4 ); + if ( status != STATUS_SUCCESS ) + return status; + + lcl_data = 0; + do + { + // Timeout checks + if (++cnt > 5000 ) + { + return STATUS_DEVICE_NOT_READY; + } + + status = fw_access_pciconf(p_BusInterface, FW_READ, &lcl_data, FLASH_OFFSET, 4 ); + + if ( status != STATUS_SUCCESS ) + return status; + + } while(lcl_data & CMD_MASK); + + status = fw_access_pciconf(p_BusInterface, FW_READ, p_buffer, FLASH_OFFSET+4, length ); + return status; +} + +static NTSTATUS +fw_flash_get_ca_guid( + IN DEVICE_OBJECT *p_dev_obj, + OUT net64_t *ca_guid ) +{ + NTSTATUS status = STATUS_SUCCESS; + BUS_INTERFACE_STANDARD BusInterface; + + uint32_t NODE_GUIDH, NODE_GUIDL; + uint32_t prim_ptr = 0; + uint32_t signature; + + primary_sector_t ps; + cl_memset( &ps, 0, sizeof(primary_sector_t)); + + status = fw_get_pci_bus_interface(p_dev_obj, &BusInterface); + + if ( !NT_SUCCESS( status ) ) + return status; + + status = fw_flash_init (&BusInterface); + if (status != STATUS_SUCCESS ) + return status; + status = fw_flash_read_data(&BusInterface, &signature, 0x24, 4); + if (status != STATUS_SUCCESS ) + return status; + //signature = cl_ntoh32(signature); + + if (signature == FW_SIGNATURE) + { + //Fail Safe image + + // Assume flash has been verified, and both images have the same guids, therefore, + // we only need to read the primary image's guids + status = fw_flash_readbuf(&BusInterface, FW_SECT_SIZE, &ps, sizeof(ps)); + if ( status == STATUS_SUCCESS ) + { + status = fw_flash_read_data(&BusInterface, &prim_ptr, ps.fi_addr+0x24, 4); + if (status == STATUS_SUCCESS ) + prim_ptr = prim_ptr + ps.fi_addr; + } + } + else + { + // Short image + prim_ptr = signature; + } + + if ( signature == FW_SIGNATURE || prim_ptr < MAX_FLASH_SIZE ) + { + /* now we can read ca guid + * since we read it in host mode fw_flash_read4() + * swaps it back in BE - how it was stored in FW + */ + if (( status = fw_flash_read4(&BusInterface, prim_ptr, &NODE_GUIDL)) == STATUS_SUCCESS ) + if (( status = fw_flash_read4(&BusInterface, prim_ptr+4, &NODE_GUIDH)) == STATUS_SUCCESS ) + { + *ca_guid = NODE_GUIDH; + *ca_guid = (*ca_guid << 32) | NODE_GUIDL; + } + } + else + { + //invalid GUID pointer + return STATUS_NO_SUCH_DEVICE; + } + fw_flash_deinit(&BusInterface); + BusInterface.InterfaceDereference((PVOID)BusInterface.Context); + return status; +} + +static NTSTATUS +fw_flash_read4( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN uint32_t addr, + IN OUT uint32_t *p_data) +{ + NTSTATUS status = STATUS_SUCCESS; + uint32_t lcl_data = 0; + uint32_t bank; + static uint32_t curr_bank = 0xffffffff; + + if (addr & 0x3) + return STATUS_INVALID_PARAMETER; + + bank = addr & BANK_MASK; + if (bank != curr_bank) + { + curr_bank = bank; + if ((status = fw_set_bank(p_BusInterface, bank)) != STATUS_SUCCESS ) + return STATUS_INVALID_PARAMETER; + } + status = fw_flash_read_data(p_BusInterface, &lcl_data, addr, 4); + *p_data = cl_ntoh32(lcl_data); + return STATUS_SUCCESS; +} + +static NTSTATUS +fw_flash_readbuf( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN uint32_t offset, + IN OUT void *p_data, + IN uint32_t len) +{ + NTSTATUS status = STATUS_SUCCESS; + uint32_t *p_lcl_data; + uint32_t i; + + if (offset & 0x3) + { + //Address should be 4-bytes aligned + return STATUS_INVALID_PARAMETER; + } + if (len & 0x3) + { + //Length should be 4-bytes aligned + return STATUS_INVALID_PARAMETER; + } + p_lcl_data = (uint32_t *)p_data; + + for ( i=0; i < (len >> 2); i++) + { + if ( (status = fw_flash_read_data( p_BusInterface, p_lcl_data, offset, sizeof(uint32_t) )) != STATUS_SUCCESS ) + return status; + offset += 4; + p_lcl_data++; + } + return STATUS_SUCCESS; +} // Flash::flash_read + +static NTSTATUS +fw_flash_writebuf( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ) +{ + NTSTATUS status = STATUS_SUCCESS; + uint32_t i; + uint8_t *p_data = (uint8_t *)p_buffer; + + for ( i = 0; i < length; i++ ) + { + status = fw_flash_write_data (p_BusInterface, p_data, offset, 1 ); + if (status != STATUS_SUCCESS ) + return status; + p_data++; + offset++; + } + return status; +} +static NTSTATUS +fw_flash_init( + IN BUS_INTERFACE_STANDARD *p_BusInterface ) +{ + uint32_t dir; + uint32_t pol; + uint32_t mod; + + uint32_t cnt=0; + uint32_t data; + NTSTATUS status = STATUS_SUCCESS; + uint32_t semaphore = 0; + + while ( !semaphore ) + { + status = fw_access_pciconf(p_BusInterface, FW_READ , &data, SEMAP63, 4); + if ( status != STATUS_SUCCESS ) + break; + if( !data ) + { + semaphore = 1; + break; + } + if (++cnt > 5000 ) + { + break; + } + } + + if ( !semaphore ) + { + return STATUS_NOT_SUPPORTED; + } + + // Save old values + + status = fw_access_pciconf(p_BusInterface, FW_READ , &old_dir,GPIO_DIR_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_READ , &old_pol,GPIO_POL_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_READ , &old_mod,GPIO_MOD_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_READ , &old_dat,GPIO_DAT_L , 4); + + // Set Direction=1, Polarity=0, Mode=0 for 3 GPIO lower bits + dir = old_dir | 0x70; + pol = old_pol & ~0x70; + mod = old_mod & ~0x70; + + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &dir,GPIO_DIR_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &pol,GPIO_POL_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &mod,GPIO_MOD_L , 4); + if ( status == STATUS_SUCCESS ) + // Set CPUMODE + status = fw_access_pciconf(p_BusInterface, FW_READ , &data, CPUMODE, 4); + if ( status == STATUS_SUCCESS ) + { + data &= ~CPUMODE_MSK; + data |= 1 << CPUMODE_SHIFT; + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &data, CPUMODE, 4); + } + if ( status == STATUS_SUCCESS ) + { + // Reset flash + data = 0xf0; + status = fw_flash_write_data(p_BusInterface, &data, 0x0, 4); + } + return status; +} + +static NTSTATUS +fw_flash_deinit( + IN BUS_INTERFACE_STANDARD *p_BusInterface ) +{ + uint32_t data = 0; + NTSTATUS status = STATUS_SUCCESS; + + status = fw_set_bank(p_BusInterface, 0); + if ( status == STATUS_SUCCESS ) + // Restore origin values + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &old_dir,GPIO_DIR_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &old_pol,GPIO_POL_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &old_mod,GPIO_MOD_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &old_dat,GPIO_DAT_L , 4); + if ( status == STATUS_SUCCESS ) + // Free GPIO Semaphore + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &data, SEMAP63, 4); + return status; +} + +static NTSTATUS +fw_set_bank( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN uint32_t bank ) +{ + NTSTATUS status = STATUS_SUCCESS; + uint32_t data = ( (uint32_t)0x70 << 24 ); + uint32_t mask = ((bank >> (BANK_SHIFT-4)) << 24 ); + + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &data, GPIO_DATACLEAR_L, 4); + if (status == STATUS_SUCCESS) + { + // A1 + data &= mask; + //data |= mask; // for A0 + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &data, GPIO_DATASET_L, 4); + } + return status; +} diff --git a/branches/MTHCA/hw/mthca/kernel/hca_driver.h b/branches/MTHCA/hw/mthca/kernel/hca_driver.h new file mode 100644 index 00000000..b9c5e136 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/hca_driver.h @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: hca_driver.h 46 2005-05-30 17:55:53Z sleybo $ + */ + + +#if !defined( _HCA_DRIVER_H_ ) +#define _HCA_DRIVER_H_ + + +#include +#include +#include +#include +#include "mt_l2w.h" +#include "hca_debug.h" +#include "hca_data.h" +#include "hca_pnp.h" +#include "hca_pci.h" + + +#if !defined(FILE_DEVICE_INFINIBAND) // Not defined in WXP DDK +#define FILE_DEVICE_INFINIBAND 0x0000003B +#endif + +/****s* HCA/hca_reg_state_t +* NAME +* hca_reg_state_t +* +* DESCRIPTION +* State for tracking registration with AL. This state is independent of the +* device PnP state, and both are used to properly register with AL. +* +* SYNOPSIS +*/ +typedef enum _hca_reg_state +{ + HCA_SHUTDOWN, + HCA_ADDED, + HCA_STARTED, + HCA_REGISTERED + +} hca_reg_state_t; +/* +* VALUES +* HCA_SHUTDOWN +* Cleaning up. +* +* HCA_ADDED +* AddDevice was called and successfully registered for interface +* notifications. +* +* HCA_STARTED +* IRP_MN_START_DEVICE was called. The HCA is fully functional. +* +* HCA_REGISTERED +* Fully functional and registered with the bus root. +*********/ + + +typedef enum _hca_bar_type +{ + HCA_BAR_TYPE_HCR, + HCA_BAR_TYPE_UAR, + HCA_BAR_TYPE_DDR, + HCA_BAR_TYPE_MAX + +} hca_bar_type_t; + + +typedef struct _hca_bar +{ + uint64_t phys; + void *virt; + SIZE_T size; + +} hca_bar_t; + + + +typedef struct _hca_dev_ext +{ + /* ------------------------------------------------- + * PNP DATA + * ------------------------------------------------ */ + cl_pnp_po_ext_t cl_ext; /* COMPLIB PnP object */ + void * pnp_ifc_entry; /* Notification entry for PnP interface events. */ + void * pnp_target_entry; /* Notification entry for PnP target events. */ + PNP_DEVICE_STATE pnpState; /* state for PnP Manager */ + + /* ------------------------------------------------- + * POWER MANAGER DATA + * ------------------------------------------------ */ + /* Cache of the system to device power states. */ + DEVICE_POWER_STATE DevicePower[PowerSystemMaximum]; + DEVICE_POWER_STATE PowerState; /* state for Power Manager */ + PIO_WORKITEM pPoWorkItem; + + /* ------------------------------------------------- + * IB_AL DATA + * ------------------------------------------------ */ + ib_ci_ifc_t ci_ifc; /* Interface for the lower edge of the IB_AL device. */ + hca_reg_state_t state; /* State for tracking registration with AL */ + DEVICE_OBJECT * p_al_dev; /* IB_AL FDO */ + FILE_OBJECT * p_al_file_obj; /* IB_AL file object */ + + /* ------------------------------------------------- + * LOW LEVEL DRIVER' DATA + * ------------------------------------------------ */ + mlnx_hca_t hca; + + /* ------------------------------------------------- + * OS DATA + * ------------------------------------------------ */ + hca_bar_t bar[HCA_BAR_TYPE_MAX]; /* HCA memory bars */ + CM_PARTIAL_RESOURCE_DESCRIPTOR interruptInfo; /* HCA interrupt resources */ + PKINTERRUPT int_obj; /* HCA interrupt object */ + spinlock_t isr_lock; /* lock for the ISR */ + + /* ------------------------------------------------- + * VARIABLES + * ------------------------------------------------ */ + DMA_ADAPTER * p_dma_adapter; /* HCA adapter object */ + ULONG n_map_regs; /* num of allocated adapter map registers */ + PCI_COMMON_CONFIG hcaConfig; /* saved HCA PCI configuration header */ + int hca_hidden; /* flag: when set - no attached DDR memory */ + +} hca_dev_ext_t; + +#define EXT_FROM_HOB(hob_p) (container_of(hob_p, hca_dev_ext_t, hca.hob)) +#define IBDEV_FROM_HOB(hob_p) (&EXT_FROM_HOB(hob_p)->hca.mdev->ib_dev) +#define HOBUL_FROM_HOB(hob_p) (&EXT_FROM_HOB(hob_p)->hca.hobul) + + + +/*********************************** +Firmware Update definitions +***********************************/ +#define PCI_CONF_ADDR (0x00000058) +#define PCI_CONF_DATA (0x0000005c) +#define FLASH_OFFSET (0x000f01a4) +#define READ_BIT (1<<29) +#define WRITE_BIT (2<<29) +#define ADDR_MSK (0x0007ffff) +#define CMD_MASK (0xe0000000) +#define BANK_SHIFT (19) +#define BANK_MASK (0xfff80000) +#define MAX_FLASH_SIZE (0x80000) // 512K + +#define SEMAP63 (0xf03fc) +#define GPIO_DIR_L (0xf008c) +#define GPIO_POL_L (0xf0094) +#define GPIO_MOD_L (0xf009c) +#define GPIO_DAT_L (0xf0084) +#define GPIO_DATACLEAR_L (0xf00d4) +#define GPIO_DATASET_L (0xf00dc) + +#define CPUMODE (0xf0150) +#define CPUMODE_MSK (0xc0000000UL) +#define CPUMODE_SHIFT (30) + +/* Definitions intended to become shared with UM. Later... */ +#define FW_READ 0x00 +#define FW_WRITE 0x01 +#define FW_READ_CMD 0x08 +#define FW_WRITE_CMD 0x09 +#define FW_OPEN_IF 0xe7 +#define FW_CLOSE_IF 0x7e + +#define FW_SIGNATURE (0x5a445a44) +#define FW_SECT_SIZE (0x10000) + +static inline errno_to_iberr(int err) +{ +#define ERR_NAME(a) #a +#define MAP_ERR(err,ibstatus) case err: ib_status = ibstatus; break + ib_api_status_t ib_status = IB_UNKNOWN_ERROR; + switch (err) { + MAP_ERR( ENOENT, IB_NOT_FOUND ); + MAP_ERR( EINTR, IB_INTERRUPTED ); + MAP_ERR( EAGAIN, IB_RESOURCE_BUSY ); + MAP_ERR( ENOMEM, IB_INSUFFICIENT_MEMORY ); + MAP_ERR( EACCES, IB_INVALID_PERMISSION ); + MAP_ERR( EFAULT, IB_ERROR ); + MAP_ERR( EBUSY, IB_RESOURCE_BUSY ); + MAP_ERR( ENODEV, IB_UNSUPPORTED ); + MAP_ERR( EINVAL, IB_INVALID_PARAMETER ); + MAP_ERR( ENOSYS, IB_UNSUPPORTED ); + default: + CL_TRACE (CL_DBG_ERROR, g_mlnx_dbg_lvl, + ("Unmapped errno %s (%d)\n", ERR_NAME(err), err)); + break; + } + return ib_status; +} + +#endif /* !defined( _HCA_DRIVER_H_ ) */ diff --git a/branches/MTHCA/hw/mthca/kernel/hca_mcast.c b/branches/MTHCA/hw/mthca/kernel/hca_mcast.c new file mode 100644 index 00000000..1b39abe8 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/hca_mcast.c @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: hca_mcast.c 46 2005-05-30 17:55:53Z sleybo $ + */ + + +#include +#include + +#include "hca_data.h" + +/* +* Multicast Support Verbs. +*/ +ib_api_status_t +mlnx_attach_mcast ( + IN const ib_qp_handle_t h_qp, + IN const ib_gid_t *p_mcast_gid, + IN const uint16_t mcast_lid, + OUT ib_mcast_handle_t *ph_mcast, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_qp); + UNREFERENCED_PARAMETER(p_mcast_gid); + UNREFERENCED_PARAMETER(mcast_lid); + UNREFERENCED_PARAMETER(ph_mcast); + UNREFERENCED_PARAMETER(p_umv_buf); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_attach_mcast not implemented\n")); + return IB_INVALID_CA_HANDLE; +#else + ib_api_status_t status; + + u_int32_t hca_idx = QP_HCA_FROM_HNDL(h_qp); + u_int32_t qp_num = QP_NUM_FROM_HNDL(h_qp); + u_int32_t qp_idx = 0; + mlnx_mcast_t *mcast_p = NULL; + mlnx_hobul_t *hobul_p; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + UNUSED_PARAM( mcast_lid ); + + if (!p_mcast_gid || !ph_mcast) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + if (NULL == (mcast_p = cl_zalloc( sizeof(mlnx_mcast_t)))) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + qp_idx = qp_num & hobul_p->qp_idx_mask; + + VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup); + if ( E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark ) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + memcpy(&mcast_p->mcast_gid, &p_mcast_gid->raw[0], sizeof(IB_gid_t)); + mcast_p->hca_idx = hca_idx; + mcast_p->qp_num = qp_num; + mcast_p->mark = E_MARK_MG; + + cl_mutex_acquire(&hobul_p->qp_info_tbl[qp_idx].mutex); + + if (HH_OK != THH_hob_attach_to_multicast( hobul_p->hh_hndl, qp_num, mcast_p->mcast_gid)) { + status = IB_ERROR; + goto cleanup_locked; + } + + cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex); + + *ph_mcast = (ib_mcast_handle_t)mcast_p; + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_locked: + cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex); +cleanup: + if (mcast_p) cl_free( mcast_p); + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_detach_mcast ( + IN const ib_mcast_handle_t h_mcast) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_mcast); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_detach_mcast not implemented\n")); + return IB_INVALID_CA_HANDLE; +#else + ib_api_status_t status; + mlnx_mcast_t *mcast_p = (mlnx_mcast_t *)h_mcast; + + u_int32_t hca_idx; + u_int32_t qp_num; + u_int32_t qp_idx = 0; + mlnx_hobul_t *hobul_p; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if (!mcast_p || mcast_p->mark != E_MARK_MG) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + hca_idx = mcast_p->hca_idx; + qp_num = mcast_p->qp_num; + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + qp_idx = qp_num & hobul_p->qp_idx_mask; + + VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup); + if ( E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark ) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + cl_mutex_acquire(&hobul_p->qp_info_tbl[qp_idx].mutex); + + if (HH_OK != THH_hob_detach_from_multicast( hobul_p->hh_hndl, qp_num, mcast_p->mcast_gid)) { + status = IB_ERROR; + goto cleanup_locked; + } + + cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex); + + mcast_p->mark = E_MARK_INVALID; + cl_free( mcast_p); + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_locked: + cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex); +cleanup: + if (mcast_p) { + mcast_p->mark = E_MARK_INVALID; + cl_free( mcast_p); + } + + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + + +void +mlnx_mcast_if( + IN OUT ci_interface_t *p_interface ) +{ + p_interface->attach_mcast = mlnx_attach_mcast; + p_interface->detach_mcast = mlnx_detach_mcast; +} diff --git a/branches/MTHCA/hw/mthca/kernel/hca_memory.c b/branches/MTHCA/hw/mthca/kernel/hca_memory.c new file mode 100644 index 00000000..ded3062a --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/hca_memory.c @@ -0,0 +1,1179 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: hca_memory.c 46 2005-05-30 17:55:53Z sleybo $ + */ + + +#include "hca_driver.h" + +static inline u32 convert_access(ib_access_t acc) +{ + return (acc & IB_AC_ATOMIC ? IB_ACCESS_REMOTE_ATOMIC : 0) | + (acc & IB_AC_RDMA_WRITE ? IB_ACCESS_REMOTE_WRITE : 0) | + (acc & IB_AC_RDMA_READ ? IB_ACCESS_REMOTE_READ : 0) | + (acc & IB_AC_LOCAL_WRITE ? IB_ACCESS_LOCAL_WRITE : 0); +} + + +/* + * Memory Management Verbs. + */ + +ib_api_status_t +mlnx_register_mr ( + IN const ib_pd_handle_t h_pd, + IN const ib_mr_create_t *p_mr_create, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t *ph_mr, + IN boolean_t um_call ) +{ +#ifndef WIN_TO_BE_CHANGED + + ib_api_status_t status; + int err; + u_int32_t lkey, rkey; + struct ib_mr *mr_p; + struct ib_umem region; + mt_iobuf_t iobuf; + struct ib_udata udata; + struct mthca_mr *mro_p; + struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd; + + + UNUSED_PARAM( um_call ); + + HCA_ENTER(MLNX_DBG_TRACE); + + HCA_TRACE(CL_DBG_ERROR, ("mlnx_modify_mr not implemented\n")); + return IB_UNSUPPORTED; + + // sanity checks + if( !cl_is_blockable() ) { + status = IB_UNSUPPORTED; + goto err_unsupported; + } + if (!p_mr_create || 0 == p_mr_create->length) { + status = IB_INVALID_PARAMETER; + goto err_invalid_parm; + } + + // lock buffer for user + if (um_call) { + err = iobuf_register( + (UINT_PTR)p_mr_create->vaddr, + p_mr_create->length, + um_call, + (int)p_mr_create->access_ctrl, + &iobuf ); + if (err) { + HCA_TRACE (CL_DBG_ERROR, ("iobuf_register failed(%d) \n",err)); + status = errno_to_iberr(err); + goto err_lock; + } + } + + // prepare parameters + RtlZeroMemory(®ion, sizeof(region)); + RtlZeroMemory(&udata, sizeof(udata)); + region.user_base = (u64)p_mr_create->vaddr; + region.virt_base = (u64)p_mr_create->vaddr; + region.page_size = PAGE_SIZE; + region.length = p_mr_create->length; + //TODO: end filling region (add list of chunks) + //TODO: fill udata + + // register mr + mr_p = mthca_reg_user_mr(ib_pd_p, ®ion, + convert_access(p_mr_create->access_ctrl), &udata); + if (IS_ERR(mr_p)) { + err = PTR_ERR(mr_p); + CL_TRACE (CL_DBG_ERROR, g_mlnx_dbg_lvl, + ("mthca_reg_user_mr failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_reg_user_mr; + } + + // results + mro_p = (struct mthca_mr *)mr_p; + mro_p->iobuf = iobuf; + if (ph_mr) *ph_mr = (ib_mr_handle_t)mr_p; + *p_lkey = mr_p->lkey; + *p_rkey = mr_p->rkey; + status = IB_SUCCESS; + +err_reg_user_mr: + if (um_call) + iobuf_deregister(&iobuf ); +err_lock: +err_invalid_parm: +err_unsupported: + HCA_TRACE_ERR(CL_DBG_ERROR, ("completes with ERROR status %s\n", ib_get_err_str(status))); + HCA_EXIT(MLNX_DBG_TRACE); + return status; + +#else + u_int32_t hca_idx = PD_HCA_FROM_HNDL(h_pd); + u_int32_t pd_idx = PD_NUM_FROM_HNDL(h_pd); + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + HH_mr_t mr_props; + mlnx_mro_t *mro_p = NULL; + u_int32_t lkey=0, rkey=0; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( !cl_is_blockable() ) + return IB_UNSUPPORTED; + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_PD_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + if (!p_mr_create || 0 == p_mr_create->length) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + if (NULL == (mro_p = cl_zalloc( sizeof(mlnx_mro_t)))) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + // Convert MR properties (LOCKS THE REGION as a side effect) + cl_memclr(&mr_props, sizeof(HH_mr_t)); + status = mlnx_conv_ibal_mr_create(pd_idx, mro_p, + VAPI_MR_CHANGE_TRANS | VAPI_MR_CHANGE_PD | VAPI_MR_CHANGE_ACL, + p_mr_create, um_call, &mr_props); + if (status != IB_SUCCESS ) { + goto cleanup; + } + + // Register MR + if (HH_OK != THH_hob_register_mr(hobul_p->hh_hndl, &mr_props, &lkey, &rkey)) { + status = IB_ERROR; + goto cleanup_post_lock; + } + + if (p_lkey) *p_lkey = lkey; + if (p_rkey) *p_rkey = cl_hton32( rkey ); + + // update PD object count + cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count); + + mro_p->mark = E_MARK_MR; + mro_p->mr_type = E_MR_ANY; + mro_p->mr_pd_handle = PD_HNDL_FROM_PD(pd_idx); + mro_p->mr_lkey = lkey; + if (ph_mr) *ph_mr = (ib_mr_handle_t)mro_p; + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_post_lock: + MOSAL_iobuf_deregister(mro_p->mr_iobuf); + +cleanup: + if (mro_p) { + mro_p->mark = E_MARK_INVALID; + cl_free( mro_p); + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("file %s line %d\n", __FILE__, __LINE__)); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mro_p 0x%p mark %d\n", mro_p, (mro_p ? mro_p->mark : 0xBAD))); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_register_pmr ( + IN const ib_pd_handle_t h_pd, + IN const ib_phys_create_t* const p_pmr_create, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr, + IN boolean_t um_call ) +{ +#ifndef WIN_TO_BE_CHANGED + ib_api_status_t status; + int err; + u_int32_t lkey, rkey; + struct ib_mr *mr_p; + struct ib_phys_buf *buffer_list; + struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd; + + UNUSED_PARAM( um_call ); + + HCA_ENTER(MLNX_DBG_TRACE); + + // sanity checks + if( !cl_is_blockable() ) { + status = IB_UNSUPPORTED; + goto err_unsupported; + } + if (!p_vaddr || !p_pmr_create || + 0 == p_pmr_create->length ) { + status = IB_INVALID_PARAMETER; + goto err_invalid_parm; + } + + // prepare parameters + buffer_list = (void*)p_pmr_create->range_array; + //NB: p_pmr_create->buf_offset is not used, i.e. supposed that region is page-aligned + //NB: p_pmr_create->hca_page_size is not used, i.e. supposed it is always the same + + // register pmr + mr_p = mthca_reg_phys_mr(ib_pd_p, + buffer_list, p_pmr_create->num_ranges, + convert_access(p_pmr_create->access_ctrl), + p_vaddr ); + if (IS_ERR(mr_p)) { + err = PTR_ERR(mr_p); + CL_TRACE (CL_DBG_ERROR, g_mlnx_dbg_lvl, + ("mthca_reg_phys_mr failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_reg_phys_mr; + } + + // results + if (ph_mr) *ph_mr = (ib_mr_handle_t)mr_p; + *p_lkey = mr_p->lkey; + *p_rkey = mr_p->rkey; + //NB: p_vaddr was not changed + status = IB_SUCCESS; + +err_reg_phys_mr: +err_invalid_parm: +err_unsupported: + HCA_TRACE_ERR(CL_DBG_ERROR, ("completes with ERROR status %s\n", ib_get_err_str(status))); + HCA_EXIT(MLNX_DBG_TRACE); + return status; + +#else + u_int32_t hca_idx = PD_HCA_FROM_HNDL(h_pd); + u_int32_t pd_idx = PD_NUM_FROM_HNDL(h_pd); + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + HH_mr_t mr_props = { 0 }; + mlnx_mro_t *mro_p = NULL; + u_int32_t lkey, rkey; + + UNUSED_PARAM( um_call ); + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( !cl_is_blockable() ) + return IB_UNSUPPORTED; + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_PD_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + if (!p_vaddr || !p_pmr_create || + 0 == p_pmr_create->length ) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + mro_p = cl_zalloc( sizeof(mlnx_mro_t)); + if ( !mro_p ) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + // Convert PMR properties + mro_p->mr_start = *p_vaddr; + cl_memclr(&mr_props, sizeof(HH_mr_t)); + status = mlnx_conv_ibal_pmr_create( pd_idx, mro_p, p_pmr_create, + &mr_props ); + if (status != IB_SUCCESS ) { + goto cleanup; + } + + // Register MR + if (HH_OK != THH_hob_register_mr( hobul_p->hh_hndl, &mr_props, + &lkey, &rkey )) { + status = IB_ERROR; + goto cleanup; + } + if (p_lkey) *p_lkey = lkey; + if (p_rkey) *p_rkey = cl_hton32( rkey ); + + if( mr_props.tpt.tpt.buf_lst.buf_sz_lst ) + cl_free( mr_props.tpt.tpt.buf_lst.buf_sz_lst ); + + if( mr_props.tpt.tpt.buf_lst.phys_buf_lst ) + cl_free( mr_props.tpt.tpt.buf_lst.phys_buf_lst ); + + // update PD object count + cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count); + + mro_p->mark = E_MARK_MR; + mro_p->mr_type = E_MR_PHYS; + mro_p->mr_pd_handle = PD_HNDL_FROM_PD(pd_idx); + mro_p->mr_lkey = lkey; + if (ph_mr) *ph_mr = (ib_mr_handle_t)mro_p; + *p_vaddr = mro_p->mr_start; // return the updated address + + CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("file %s line %d\n", __FILE__, __LINE__)); + CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("mro_p 0x%p mark %d\n", mro_p, (mro_p ? mro_p->mark : 0xBAD))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if( mr_props.tpt.tpt.buf_lst.buf_sz_lst ) + cl_free( mr_props.tpt.tpt.buf_lst.buf_sz_lst ); + + if( mr_props.tpt.tpt.buf_lst.phys_buf_lst ) + cl_free( mr_props.tpt.tpt.buf_lst.phys_buf_lst ); + + if (mro_p) { + mro_p->mark = E_MARK_INVALID; + cl_free( mro_p); + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_query_mr ( + IN const ib_mr_handle_t h_mr, + OUT ib_mr_attr_t *p_mr_query ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_mr); + UNREFERENCED_PARAMETER(p_mr_query); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_query_mr not implemented\n")); + return IB_UNSUPPORTED; +#else + u_int32_t hca_idx; + u_int32_t pd_idx; + mlnx_hobul_t *hobul_p; + ib_api_status_t status = IB_SUCCESS; + + HH_mr_info_t mr_info; + mlnx_mro_t *mro_p = NULL; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( !cl_is_blockable() ) + return IB_UNSUPPORTED; + + mro_p = (mlnx_mro_t *)h_mr; + if (!mro_p || mro_p->mark != E_MARK_MR) { + status = IB_INVALID_MR_HANDLE; + goto cleanup; + } + + hca_idx = PD_HCA_FROM_HNDL(mro_p->mr_pd_handle); + pd_idx = PD_NUM_FROM_HNDL(mro_p->mr_pd_handle); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_MR_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_MR_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("file %s line %d\n", __FILE__, __LINE__)); + CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("mro_p 0x%p mark %d\n", mro_p, (mro_p ? mro_p->mark : 0xBAD))); + + if (HH_OK != THH_hob_query_mr(hobul_p->hh_hndl, mro_p->mr_lkey, &mr_info)) { + status = IB_ERROR; + goto cleanup; + } + + mlnx_conv_vapi_mr_attr((ib_pd_handle_t)PD_HNDL_FROM_PD(pd_idx), &mr_info, p_mr_query); + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + + +ib_api_status_t +mlnx_modify_mr ( + IN const ib_mr_handle_t h_mr, + IN const ib_mr_mod_t mem_modify_req, + IN const ib_mr_create_t *p_mr_create, + OUT uint32_t *p_lkey, + OUT uint32_t *p_rkey, + IN const ib_pd_handle_t h_pd OPTIONAL, + IN boolean_t um_call ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_mr); + UNREFERENCED_PARAMETER(mem_modify_req); + UNREFERENCED_PARAMETER(p_mr_create); + UNREFERENCED_PARAMETER(p_lkey); + UNREFERENCED_PARAMETER(p_rkey); + UNREFERENCED_PARAMETER(h_pd); + UNREFERENCED_PARAMETER(um_call); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_modify_mr not implemented\n")); + return IB_UNSUPPORTED; +#else + u_int32_t hca_idx; + u_int32_t pd_idx, old_pd_idx; + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + VAPI_mr_change_t change_mask; + HH_mr_t mr_props; + mlnx_mro_t *mro_p = NULL; + u_int32_t lkey, rkey; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( !cl_is_blockable() ) + return IB_UNSUPPORTED; + + mro_p = (mlnx_mro_t *)h_mr; + if (!mro_p || mro_p->mark != E_MARK_MR) { + status = IB_INVALID_MR_HANDLE; + goto cleanup; + } + + if ( !p_mr_create || 0 == p_mr_create->length || + !p_lkey || !p_rkey) + { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + if( (mem_modify_req & IB_MR_MOD_PD) && !h_pd ) + { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + hca_idx = PD_HCA_FROM_HNDL(mro_p->mr_pd_handle); + if( (mem_modify_req & IB_MR_MOD_PD) && h_pd ) + pd_idx = PD_NUM_FROM_HNDL(h_pd); + else + pd_idx = PD_NUM_FROM_HNDL(mro_p->mr_pd_handle); + + old_pd_idx = PD_NUM_FROM_HNDL(mro_p->mr_pd_handle); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_MR_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_MR_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("file %s line %d\n", __FILE__, __LINE__)); + CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("mro_p 0x%p mark %d\n", mro_p, (mro_p ? mro_p->mark : 0xBAD))); + + // change_mask = mem_modify_req; + change_mask = 0; + if (mem_modify_req & IB_MR_MOD_ADDR) change_mask |= VAPI_MR_CHANGE_TRANS; + if (mem_modify_req & IB_MR_MOD_PD) change_mask |= VAPI_MR_CHANGE_PD; + if (mem_modify_req & IB_MR_MOD_ACCESS) change_mask |= VAPI_MR_CHANGE_ACL; + + cl_memclr(&mr_props, sizeof(HH_mr_t)); + status = mlnx_conv_ibal_mr_create(pd_idx, mro_p, change_mask, p_mr_create, + um_call, &mr_props); + if ( status != IB_SUCCESS ) { + goto cleanup; + } + + if (HH_OK != THH_hob_reregister_mr(hobul_p->hh_hndl, + mro_p->mr_lkey, + change_mask, + &mr_props, + &lkey, &rkey)) + { + status = IB_ERROR; + goto cleanup; + } + + // update PD object count + if( (mem_modify_req & IB_MR_MOD_PD) && h_pd ) + { + mro_p->mr_pd_handle = PD_HNDL_FROM_PD( pd_idx ); + cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count); + cl_atomic_dec(&hobul_p->pd_info_tbl[old_pd_idx].count); + } + + // Update our "shadow" (TBD: old memory region may need to be unlocked) + mro_p->mr_lkey = lkey; + + // Return new keys to the caller + if (p_lkey) *p_lkey = lkey; + if (p_rkey) *p_rkey = rkey; + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + + +ib_api_status_t +mlnx_modify_pmr ( + IN const ib_mr_handle_t h_mr, + IN const ib_mr_mod_t mem_modify_req, + IN const ib_phys_create_t* const p_pmr_create, + IN OUT uint64_t* const p_vaddr, + OUT uint32_t* const p_lkey, + OUT uint32_t* const p_rkey, + IN const ib_pd_handle_t h_pd OPTIONAL, + IN boolean_t um_call ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_mr); + UNREFERENCED_PARAMETER(mem_modify_req); + UNREFERENCED_PARAMETER(p_pmr_create); + UNREFERENCED_PARAMETER(p_vaddr); + UNREFERENCED_PARAMETER(p_lkey); + UNREFERENCED_PARAMETER(p_rkey); + UNREFERENCED_PARAMETER(h_pd); + UNREFERENCED_PARAMETER(um_call); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_modify_pmr not implemented\n")); + return IB_UNSUPPORTED; +#else + u_int32_t hca_idx; + u_int32_t pd_idx; + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + VAPI_mr_change_t change_mask; + HH_mr_t mr_props = { 0 }; + mlnx_mro_t *mro_p = NULL; + u_int32_t lkey, rkey; + + UNUSED_PARAM( um_call ); + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( !cl_is_blockable() ) + return IB_UNSUPPORTED; + + mro_p = (mlnx_mro_t *)h_mr; + if (!mro_p || mro_p->mark != E_MARK_MR) { + status = IB_INVALID_MR_HANDLE; + goto cleanup; + } + + if ( !p_pmr_create || 0 == p_pmr_create->length || + !p_lkey || !p_rkey) + { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + hca_idx = PD_HCA_FROM_HNDL(mro_p->mr_pd_handle); + if( h_pd ) + pd_idx = PD_NUM_FROM_HNDL( h_pd ); + else + pd_idx = PD_NUM_FROM_HNDL(mro_p->mr_pd_handle); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_MR_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_MR_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + // change_mask = mem_modify_req; + change_mask = 0; + if (mem_modify_req & IB_MR_MOD_ADDR) change_mask |= VAPI_MR_CHANGE_TRANS; + if (mem_modify_req & IB_MR_MOD_PD) change_mask |= VAPI_MR_CHANGE_PD; + if (mem_modify_req & IB_MR_MOD_ACCESS) change_mask |= VAPI_MR_CHANGE_ACL; + + // Convert PMR properties + mro_p->mr_start = *p_vaddr; + cl_memclr(&mr_props, sizeof(HH_mr_t)); + if (IB_SUCCESS != (status = mlnx_conv_ibal_pmr_create(pd_idx, mro_p, p_pmr_create, &mr_props))) { + goto cleanup; + } + + if (HH_OK != THH_hob_reregister_mr(hobul_p->hh_hndl, + mro_p->mr_lkey, + change_mask, + &mr_props, + &lkey, &rkey)) + { + status = IB_ERROR; + goto cleanup; + } + + if( mr_props.tpt.tpt.buf_lst.buf_sz_lst ) + cl_free( mr_props.tpt.tpt.buf_lst.buf_sz_lst ); + + if( mr_props.tpt.tpt.buf_lst.phys_buf_lst ) + cl_free( mr_props.tpt.tpt.buf_lst.phys_buf_lst ); + + // Update our "shadow" + mro_p->mr_lkey = lkey; + + // Return new keys to the caller + if (p_lkey) *p_lkey = lkey; + if (p_rkey) *p_rkey = rkey; + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if( mr_props.tpt.tpt.buf_lst.buf_sz_lst ) + cl_free( mr_props.tpt.tpt.buf_lst.buf_sz_lst ); + + if( mr_props.tpt.tpt.buf_lst.phys_buf_lst ) + cl_free( mr_props.tpt.tpt.buf_lst.phys_buf_lst ); + + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_register_smr ( + IN const ib_mr_handle_t h_mr, + IN const ib_pd_handle_t h_pd, + IN const ib_access_t access_ctrl, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr, + IN boolean_t um_call ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_mr); + UNREFERENCED_PARAMETER(h_pd); + UNREFERENCED_PARAMETER(access_ctrl); + UNREFERENCED_PARAMETER(p_vaddr); + UNREFERENCED_PARAMETER(p_lkey); + UNREFERENCED_PARAMETER(p_rkey); + UNREFERENCED_PARAMETER(ph_mr); + UNREFERENCED_PARAMETER(um_call); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_register_smr not implemented\n")); + return IB_UNSUPPORTED; +#else + u_int32_t hca_idx = PD_HCA_FROM_HNDL(h_pd); + u_int32_t pd_idx = PD_NUM_FROM_HNDL(h_pd); + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + HH_smr_t smr_props; + mlnx_mro_t *base_mro_p = NULL; + mlnx_mro_t *new_mro_p = NULL; + u_int32_t lkey, rkey; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( !cl_is_blockable() ) + return IB_UNSUPPORTED; + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_PD_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + if (!ph_mr || !p_vaddr || !p_lkey || !p_rkey ) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + base_mro_p = (mlnx_mro_t *)h_mr; + if (!base_mro_p || base_mro_p->mark != E_MARK_MR) { + status = IB_INVALID_MR_HANDLE; + goto cleanup; + } + + // Convert SMR properties + smr_props.lkey = base_mro_p->mr_lkey; // L-Key of the region to share with + // This region start virtual addr + smr_props.start = *p_vaddr; + // PD handle for new memory region + smr_props.pd = PD_NUM_FROM_HNDL(base_mro_p->mr_pd_handle); + smr_props.acl = map_ibal_acl(access_ctrl); // Access control (R/W permission local/remote + + // Allocate new handle for shared region + if (NULL == (new_mro_p = cl_zalloc( sizeof(mlnx_mro_t)))) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + new_mro_p->mr_start = *p_vaddr; + new_mro_p->mr_size = base_mro_p->mr_size; + + // This computation should be externalized by THH + new_mro_p->mr_mosal_perm = + MOSAL_PERM_READ | + ((smr_props.acl & VAPI_EN_LOCAL_WRITE) ? MOSAL_PERM_WRITE : 0); + + if (IB_SUCCESS != (status = mlnx_lock_region(new_mro_p, um_call ))) { + goto cleanup; + } + + // Register MR + if (HH_OK != THH_hob_register_smr(hobul_p->hh_hndl, &smr_props, &lkey, &rkey)) { + status = IB_ERROR; + goto cleanup; + } + + // Return modified values + *p_vaddr = smr_props.start; + *p_lkey = lkey; + *p_rkey = cl_hton32( rkey ); + + // update PD object count + cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count); + + new_mro_p->mark = E_MARK_MR; + new_mro_p->mr_type = E_MR_SHARED; + new_mro_p->mr_pd_handle = PD_HNDL_FROM_PD(pd_idx); + new_mro_p->mr_lkey = lkey; + + *ph_mr = (ib_mr_handle_t)new_mro_p; + +// CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("new_mro_p 0x%p page 0x%x, %d\n", +// new_mro_p, new_mro_p->mr_first_page_addr, new_mro_p->mr_num_pages)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if (new_mro_p) { + new_mro_p->mark = E_MARK_INVALID; + cl_free( new_mro_p); + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_deregister_mr ( + IN const ib_mr_handle_t h_mr) +{ +#ifndef WIN_TO_BE_CHANGED + ib_api_status_t status; + int err; + + HCA_ENTER(MLNX_DBG_TRACE); + + // sanity checks + if( !cl_is_blockable() ) { + status = IB_UNSUPPORTED; + goto err_unsupported; + } + + // deregister + err = mthca_dereg_mr((struct ib_mr *)h_mr); + if (err) { + status = errno_to_iberr(err); + HCA_TRACE (CL_DBG_ERROR, + ("mthca_dereg_mr failed (%d) for mr %p\n", err, h_mr)); + goto err_dereg_mr; + } + + // unlock user buffer + { + struct mthca_mr *mro_p = (struct mthca_mr *)h_mr; + if (mro_p->iobuf.is_user) + iobuf_deregister( &mro_p->iobuf ); + } + + status = IB_SUCCESS; + +err_dereg_mr: +err_unsupported: + HCA_TRACE_ERR(CL_DBG_ERROR, ("completes with ERROR status %s\n", ib_get_err_str(status))); + HCA_EXIT(MLNX_DBG_TRACE); + return status; + +#else + mlnx_mro_t *mro_p = NULL; + u_int32_t hca_idx; + u_int32_t pd_idx; + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( !cl_is_blockable() ) + return IB_UNSUPPORTED; + + mro_p = (mlnx_mro_t *)h_mr; + if (!mro_p || mro_p->mark != E_MARK_MR) { + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("file %s line %d\n", __FILE__, __LINE__)); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mro_p 0x%p mark %d\n", mro_p, (mro_p ? mro_p->mark : 0xBAD))); + status = IB_INVALID_MR_HANDLE; + goto cleanup; + } + + hca_idx = PD_HCA_FROM_HNDL(mro_p->mr_pd_handle); + pd_idx = PD_NUM_FROM_HNDL(mro_p->mr_pd_handle); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_MR_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_MR_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + THH_hob_deregister_mr(hobul_p->hh_hndl, mro_p->mr_lkey); + + if (mro_p->mr_type != E_MR_PHYS) { + MOSAL_iobuf_deregister(mro_p->mr_iobuf); + } + + // update PD object count + cl_atomic_dec(&hobul_p->pd_info_tbl[pd_idx].count); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count)); + + if (mro_p) { + mro_p->mark = E_MARK_INVALID; + cl_free( mro_p); + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if (mro_p) { + mro_p->mark = E_MARK_INVALID; + cl_free( mro_p); + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +/* +* Memory Window Verbs. +*/ + +ib_api_status_t +mlnx_create_mw ( + IN const ib_pd_handle_t h_pd, + OUT net32_t* const p_rkey, + OUT ib_mw_handle_t *ph_mw, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_pd); + UNREFERENCED_PARAMETER(p_rkey); + UNREFERENCED_PARAMETER(ph_mw); + UNREFERENCED_PARAMETER(p_umv_buf); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_create_mw not implemented\n")); + return IB_UNSUPPORTED; +#else + u_int32_t hca_idx = PD_HCA_FROM_HNDL(h_pd); + u_int32_t pd_idx = PD_NUM_FROM_HNDL(h_pd); + mlnx_hobul_t *hobul_p; + mlnx_mwo_t *mwo_p = NULL; + ib_api_status_t status; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( !cl_is_blockable() ) + return IB_UNSUPPORTED; + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_PD_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + if (!p_rkey || !ph_mw) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + if (NULL == (mwo_p = cl_zalloc( sizeof(mlnx_mwo_t)))) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + if (HH_OK != THH_hob_alloc_mw(hobul_p->hh_hndl, pd_idx, (IB_rkey_t *)&mwo_p->mw_rkey)) + { + status = IB_ERROR; + goto cleanup; + } + + // update PD object count + cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count); + + mwo_p->mark = E_MARK_MW; + mwo_p->hca_idx = hca_idx; + mwo_p->pd_idx = pd_idx; + *p_rkey = cl_hton32( mwo_p->mw_rkey ); + + *ph_mw = (ib_mw_handle_t)mwo_p; + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if (mwo_p) { + mwo_p->mark = E_MARK_INVALID; + cl_free( mwo_p); + } + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_query_mw ( + IN const ib_mw_handle_t h_mw, + OUT ib_pd_handle_t *ph_pd, + OUT net32_t* const p_rkey, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_mw); + UNREFERENCED_PARAMETER(ph_pd); + UNREFERENCED_PARAMETER(p_rkey); + UNREFERENCED_PARAMETER(p_umv_buf); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_query_mw not implemented\n")); + return IB_UNSUPPORTED; +#else + mlnx_mwo_t *mwo_p = NULL; + u_int32_t hca_idx; + u_int32_t pd_idx; + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( !cl_is_blockable() ) + return IB_UNSUPPORTED; + + mwo_p = (mlnx_mwo_t *)h_mw; + if (!mwo_p || mwo_p->mark != E_MARK_MW) { + status = IB_INVALID_MW_HANDLE; + goto cleanup; + } + + hca_idx = mwo_p->hca_idx; + pd_idx = mwo_p->pd_idx; + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_MW_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_MW_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_MW_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_MW_HANDLE; + goto cleanup; + } + + if (ph_pd) *ph_pd = (ib_pd_handle_t)PD_HNDL_FROM_PD(pd_idx); + if (p_rkey) *p_rkey = cl_hton32( mwo_p->mw_rkey ); + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_destroy_mw ( + IN const ib_mw_handle_t h_mw) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_mw); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_destroy_mw not implemented\n")); + return IB_UNSUPPORTED; +#else + mlnx_mwo_t *mwo_p = NULL; + u_int32_t hca_idx; + u_int32_t pd_idx; + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( !cl_is_blockable() ) + return IB_UNSUPPORTED; + + mwo_p = (mlnx_mwo_t *)h_mw; + if (!mwo_p || mwo_p->mark != E_MARK_MW) { + status = IB_INVALID_MW_HANDLE; + goto cleanup; + } + + hca_idx = mwo_p->hca_idx; + pd_idx = mwo_p->pd_idx; + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_MW_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_MW_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + if (HH_OK != THH_hob_free_mw(hobul_p->hh_hndl, (IB_rkey_t)mwo_p->mw_rkey)) + { + status = IB_ERROR; + goto cleanup; + } + + // update PD object count + cl_atomic_dec(&hobul_p->pd_info_tbl[pd_idx].count); + + if (mwo_p) { + mwo_p->mark = E_MARK_INVALID; + cl_free( mwo_p); + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if (mwo_p) { + mwo_p->mark = E_MARK_INVALID; + cl_free( mwo_p); + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + + +void +mlnx_memory_if( + IN OUT ci_interface_t *p_interface ) +{ + p_interface->register_mr = mlnx_register_mr; + p_interface->register_pmr = mlnx_register_pmr; + p_interface->query_mr = mlnx_query_mr; + p_interface->modify_mr = mlnx_modify_mr; + p_interface->modify_pmr = mlnx_modify_pmr; + p_interface->register_smr = mlnx_register_smr; + p_interface->deregister_mr = mlnx_deregister_mr; + + p_interface->create_mw = mlnx_create_mw; + p_interface->query_mw = mlnx_query_mw; + p_interface->destroy_mw = mlnx_destroy_mw; +} + diff --git a/branches/MTHCA/hw/mthca/kernel/hca_pci.c b/branches/MTHCA/hw/mthca/kernel/hca_pci.c new file mode 100644 index 00000000..0e308e4f --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/hca_pci.c @@ -0,0 +1,741 @@ + +#include "hca_driver.h" +#include +#include +#include +#ifdef WIN_TO_BE_CHANGED +#include +#endif + + +#define HCA_RESET_HCR_OFFSET 0x000F0010 +#define HCA_RESET_TOKEN CL_HTON32(0x00000001) + +#define PCI_CAPABILITY_ID_VPD 0x03 +#define PCI_CAPABILITY_ID_PCIX 0x07 +#define PCI_CAPABILITY_ID_PCIEXP 0x10 + + +/* + * Vital Product Data Capability + */ +typedef struct _PCI_VPD_CAPABILITY { + + PCI_CAPABILITIES_HEADER Header; + + USHORT Flags; + ULONG Data; + +} PCI_VPD_CAPABILITY, *PPCI_VPD_CAPABILITY; + + +/* + * PCI-X Capability + */ +typedef struct _PCI_PCIX_CAPABILITY { + + PCI_CAPABILITIES_HEADER Header; + + USHORT Command; + ULONG Status; + +/* for Command: */ +} PCI_PCIX_CAPABILITY, *PPCI_PCIX_CAPABILITY; + +#define PCI_X_CMD_MAX_READ 0x000c /* Max Memory Read Byte Count */ + +/* + * PCI-Express Capability + */ +typedef struct _PCI_PCIEXP_CAPABILITY { + + PCI_CAPABILITIES_HEADER Header; + + USHORT Flags; + ULONG DevCapabilities; + USHORT DevControl; + USHORT DevStatus; + ULONG LinkCapabilities; + USHORT LinkControl; + USHORT LinkStatus; + ULONG SlotCapabilities; + USHORT SlotControl; + USHORT SlotStatus; + USHORT RootControl; + USHORT RootCapabilities; + USHORT RootStatus; +} PCI_PCIEXP_CAPABILITY, *PPCI_PCIEXP_CAPABILITY; + +/* for DevControl: */ +#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */ + +static NTSTATUS +__get_bus_ifc( + IN DEVICE_OBJECT* const pDevObj, + IN const GUID* const pGuid, + OUT BUS_INTERFACE_STANDARD *pBusIfc ); + +static void +__fixup_pci_capabilities( + IN PCI_COMMON_CONFIG* const pConfig ); + +static NTSTATUS +__save_pci_config( + IN BUS_INTERFACE_STANDARD *pBusIfc, + OUT PCI_COMMON_CONFIG* const pConfig ); + +static NTSTATUS +__restore_pci_config( + IN BUS_INTERFACE_STANDARD *pBusIfc, + IN PCI_COMMON_CONFIG* const pConfig ); + + +#ifdef ALLOC_PRAGMA +#pragma alloc_text (PAGE, __get_bus_ifc) +#pragma alloc_text (PAGE, __fixup_pci_capabilities) +#pragma alloc_text (PAGE, __save_pci_config) +#pragma alloc_text (PAGE, __restore_pci_config) +#endif + + +/* Forwards the request to the HCA's PDO. */ +static NTSTATUS +__get_bus_ifc( + IN DEVICE_OBJECT* const pDevObj, + IN const GUID* const pGuid, + OUT BUS_INTERFACE_STANDARD *pBusIfc ) +{ + NTSTATUS status; + IRP *pIrp; + IO_STATUS_BLOCK ioStatus; + IO_STACK_LOCATION *pIoStack; + DEVICE_OBJECT *pDev; + KEVENT event; + + HCA_ENTER( HCA_DBG_PNP ); + + CL_ASSERT( KeGetCurrentIrql() < DISPATCH_LEVEL ); + + pDev = IoGetAttachedDeviceReference( pDevObj ); + + KeInitializeEvent( &event, NotificationEvent, FALSE ); + + /* Build the IRP for the HCA. */ + pIrp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP, pDev, + NULL, 0, NULL, &event, &ioStatus ); + if( !pIrp ) + { + ObDereferenceObject( pDev ); + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("IoBuildSynchronousFsdRequest failed.\n") ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + /* Copy the request query parameters. */ + pIoStack = IoGetNextIrpStackLocation( pIrp ); + pIoStack->MinorFunction = IRP_MN_QUERY_INTERFACE; + pIoStack->Parameters.QueryInterface.Size = sizeof(BUS_INTERFACE_STANDARD); + pIoStack->Parameters.QueryInterface.Version = 1; + pIoStack->Parameters.QueryInterface.InterfaceType = pGuid; + pIoStack->Parameters.QueryInterface.Interface = (INTERFACE*)pBusIfc; + pIoStack->Parameters.QueryInterface.InterfaceSpecificData = NULL; + + pIrp->IoStatus.Status = STATUS_NOT_SUPPORTED; + + /* Send the IRP. */ + status = IoCallDriver( pDev, pIrp ); + if( status == STATUS_PENDING ) + { + KeWaitForSingleObject( &event, Executive, KernelMode, + FALSE, NULL ); + + status = ioStatus.Status; + } + ObDereferenceObject( pDev ); + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +/* + * Reads and saves the PCI configuration of the device accessible + * through the provided bus interface. Does not read registers 22 or 23 + * as directed in Tavor PRM 1.0.1, Appendix A. InfiniHost Software Reset. + */ +static NTSTATUS +__save_pci_config( + IN BUS_INTERFACE_STANDARD *pBusIfc, + OUT PCI_COMMON_CONFIG* const pConfig ) +{ + ULONG len; + UINT32 *pBuf; + + HCA_ENTER( HCA_DBG_PNP ); + + pBuf = (UINT32*)pConfig; + + /* + * Read the lower portion of the configuration, up to but excluding + * register 22. + */ + len = pBusIfc->GetBusData( + pBusIfc->Context, PCI_WHICHSPACE_CONFIG, &pBuf[0], 0, 88 ); + if( len != 88 ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, ("Failed to read HCA config.\n") ); + return STATUS_DEVICE_NOT_READY; + } + + /* Read the upper portion of the configuration, from register 24. */ + len = pBusIfc->GetBusData( + pBusIfc->Context, PCI_WHICHSPACE_CONFIG, &pBuf[24], 96, 160 ); + if( len != 160 ) + { + HCA_TRACE( HCA_DBG_ERROR, ("Failed to read HCA config.\n") ); + return STATUS_DEVICE_NOT_READY; + } + + HCA_EXIT( HCA_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static void +__fixup_pci_capabilities( + IN PCI_COMMON_CONFIG* const pConfig ) +{ + UCHAR *pBuf; + PCI_CAPABILITIES_HEADER *pHdr, *pNextHdr; + + HCA_ENTER( HCA_DBG_PNP ); + + pBuf = (UCHAR*)pConfig; + + if( pConfig->HeaderType == PCI_DEVICE_TYPE ) + { + if( pConfig->u.type0.CapabilitiesPtr ) + { + pNextHdr = (PCI_CAPABILITIES_HEADER*) + (pBuf + pConfig->u.type0.CapabilitiesPtr); + } + else + { + pNextHdr = NULL; + } + } + else + { + ASSERT( pConfig->HeaderType == PCI_BRIDGE_TYPE ); + if( pConfig->u.type1.CapabilitiesPtr ) + { + pNextHdr = (PCI_CAPABILITIES_HEADER*) + (pBuf + pConfig->u.type1.CapabilitiesPtr); + } + else + { + pNextHdr = NULL; + } + } + + /* + * Fix up any fields that might cause changes to the + * device - like writing VPD data. + */ + while( pNextHdr ) + { + pHdr = pNextHdr; + if( pNextHdr->Next ) + pNextHdr = (PCI_CAPABILITIES_HEADER*)(pBuf + pHdr->Next); + else + pNextHdr = NULL; + + switch( pHdr->CapabilityID ) + { + case PCI_CAPABILITY_ID_VPD: + /* Clear the flags field so we don't cause a write. */ + ((PCI_VPD_CAPABILITY*)pHdr)->Flags = 0; + break; + + default: + break; + } + } + + HCA_EXIT( HCA_DBG_PNP ); +} + + +#define PCI_CONFIG_OFFSET( field ) \ + offsetof( PCI_COMMON_CONFIG, field ) + +#define PCI_CONFIG_LEN( fromField, toField ) \ + offsetof( PCI_COMMON_CONFIG, toField ) - \ + offsetof( PCI_COMMON_CONFIG, fromField ) + \ + sizeof( ((PCI_COMMON_CONFIG*)NULL)->##toField ) + +#define PCI_CONFIG_WRITE( fromField, toField ) \ + pBusIfc->SetBusData( pBusIfc->Context, PCI_WHICHSPACE_CONFIG, \ + &pConfig->##fromField, PCI_CONFIG_OFFSET( fromField ), \ + PCI_CONFIG_LEN( fromField, toField ) ) + +/* + * Restore saved PCI configuration, skipping registers 22 and 23, as well + * as any registers where writing will have side effects such as the flags + * field of the VPD and vendor specific capabilities. The function also delays + * writing the command register, bridge control register (if applicable), and + * PCIX command register (if present). + */ +static NTSTATUS +__restore_pci_config( + IN BUS_INTERFACE_STANDARD *pBusIfc, + IN PCI_COMMON_CONFIG* const pConfig ) +{ + ULONG len; + UCHAR *pBuf; + + HCA_ENTER( HCA_DBG_PNP ); + + pBuf = (UCHAR*)pConfig; + + /* Fixup the capabilities as needed. */ + __fixup_pci_capabilities( pConfig ); + + /* Restore the vendor/device IDs */ + len = PCI_CONFIG_WRITE( VendorID, DeviceID ); + if( len != PCI_CONFIG_LEN( VendorID, DeviceID ) ) + { + HCA_TRACE( HCA_DBG_ERROR, ("Failed to write vendor/device IDs.\n") ); + return STATUS_DEVICE_NOT_READY; + } + + /* + * Skip the command register and write the rest (except the bridge + * control if this is a bridge). + */ + if( pConfig->HeaderType == PCI_DEVICE_TYPE ) + { + len = PCI_CONFIG_WRITE( Status, u.type0.MaximumLatency ); + if( len != PCI_CONFIG_LEN( Status, u.type0.MaximumLatency ) ) + { + HCA_TRACE( HCA_DBG_ERROR, ("Failed to write type 0 common header.\n") ); + return STATUS_DEVICE_NOT_READY; + } + } + else + { + ASSERT( pConfig->HeaderType == PCI_BRIDGE_TYPE ); + len = PCI_CONFIG_WRITE( Status, u.type1.InterruptPin ); + if( len != PCI_CONFIG_LEN( Status, u.type1.InterruptPin ) ) + { + HCA_TRACE( HCA_DBG_ERROR, ("Failed to write type 1 common header.\n") ); + return STATUS_DEVICE_NOT_READY; + } + } + + /* Write the capabilities back. */ + len = pBusIfc->SetBusData( pBusIfc->Context, PCI_WHICHSPACE_CONFIG, + pConfig->DeviceSpecific, PCI_CONFIG_OFFSET( DeviceSpecific ), 192 ); + if( len != 192 ) + { + HCA_TRACE( HCA_DBG_ERROR, ("Failed to write capabilites.\n") ); + return STATUS_DEVICE_NOT_READY; + } + + /* Write the command register. */ + len = PCI_CONFIG_WRITE( Command, Command ); + if( len != PCI_CONFIG_LEN( Command, Command ) ) + { + HCA_TRACE( HCA_DBG_ERROR, ("Failed to write command register.\n") ); + return STATUS_DEVICE_NOT_READY; + } + + /* Write the bridge control register if a bridge. */ + if( pConfig->HeaderType == PCI_BRIDGE_TYPE ) + { + len = + PCI_CONFIG_WRITE( u.type1.BridgeControl, u.type1.BridgeControl ); + if( len != + PCI_CONFIG_LEN( u.type1.BridgeControl, u.type1.BridgeControl ) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("Failed to write bridge control register.\n") ); + return STATUS_DEVICE_NOT_READY; + } + } + + HCA_EXIT( HCA_DBG_PNP ); + return STATUS_SUCCESS; +} + + +NTSTATUS +hca_reset( + IN DEVICE_OBJECT* const pDevObj ) +{ + NTSTATUS status; + PCI_COMMON_CONFIG hcaConfig, brConfig; + BUS_INTERFACE_STANDARD brBusIfc, hcaBusIfc; + hca_dev_ext_t *pExt; + ULONG data, i; + PULONG reset_p; + PHYSICAL_ADDRESS pa; + + HCA_ENTER( HCA_DBG_PNP ); + + /* Get the HCA's bus interface. */ + status = __get_bus_ifc( pDevObj, &GUID_BUS_INTERFACE_STANDARD, &hcaBusIfc ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE( HCA_DBG_ERROR, ("Failed to get HCA bus interface.\n") ); + goto resetErr1; + } + +#ifdef WIN_TO_BE_CHANGED + //leo: not supported this way + /* Get the HCA Bridge's bus interface. */ + status = __get_bus_ifc( pDevObj, &GUID_HCA_BRIDGE_INTERFACE, &brBusIfc ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("Failed to get HCA bridge bus interface.\n") ); + goto resetErr2; + } +#endif + + /* Save the HCA's configuration. */ + status = __save_pci_config( &hcaBusIfc, &hcaConfig ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("Failed to save HCA config.\n") ); + goto resetErr3; + } + +#ifdef WIN_TO_BE_CHANGED + //leo: not supported this way + /* Save the HCA bridge's configuration. */ + status = __save_pci_config( &brBusIfc, &brConfig ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("Failed to save bridge config.\n") ); + goto resetErr3; + } +#endif + + /* map reset register */ + pExt = (hca_dev_ext_t*)pDevObj->DeviceExtension; + pa.QuadPart = pExt->bar[HCA_BAR_TYPE_HCR].phys + HCA_RESET_HCR_OFFSET; + reset_p = MmMapIoSpace( pa, 4, MmNonCached ); + if( !reset_p ) + { + HCA_TRACE( HCA_DBG_ERROR, ("Failed to map reset register with address 0x%I64x\n", pa.QuadPart) ); + status = STATUS_UNSUCCESSFUL; + goto resetErr3; + } + + /* Issue the reset. */ + WRITE_REGISTER_ULONG( reset_p, HCA_RESET_TOKEN ); + + /* Wait a second. */ + cl_thread_suspend( 1000 ); + + /* unmap the reset register */ + MmUnmapIoSpace( reset_p, 4 ); + + +#ifdef WIN_TO_BE_CHANGED + //leo: not supported this way + /* + * Now read the bridge's configuration register until it doesn't + * return 0xFFFFFFFF. Give it 10 seconds for good measure. + */ + for( i = 0; i < 10; i++ ) + { + if( brBusIfc.GetBusData( brBusIfc.Context, PCI_WHICHSPACE_CONFIG, + &data, 0, sizeof(ULONG) ) != sizeof(ULONG) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("Failed to read bridge configuration data.\n") ); + status = STATUS_UNSUCCESSFUL; + goto resetErr3; + } + /* See if we got valid data. */ + if( data != 0xFFFFFFFF ) + break; + + cl_thread_suspend( 1000 ); + } + if( i == 10 ) + { + /* Darn, timed out. :( */ + HCA_TRACE( HCA_DBG_ERROR, + ("Doh! HCA Bridge never came back from reset!\n") ); + status = STATUS_UNSUCCESSFUL; + goto resetErr3; + } +#else + /* //TODO: ??? can we read HCA (and not bridge) for Tavor ???? + * Now read the HCA's configuration register until it doesn't + * return 0xFFFFFFFF. Give it 10 seconds for good measure. + */ + for( i = 0; i < 100; i++ ) + { + if( hcaBusIfc.GetBusData( hcaBusIfc.Context, PCI_WHICHSPACE_CONFIG, + &data, 0, sizeof(ULONG) ) != sizeof(ULONG) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("Failed to read HCA configuration data.\n") ); + status = STATUS_UNSUCCESSFUL; + goto resetErr3; + } + /* See if we got valid data. */ + if( data != 0xFFFFFFFF ) + break; + + cl_thread_suspend( 100 ); + } + if( i >= 100 ) + { + /* Darn, timed out. :( */ + HCA_TRACE( HCA_DBG_ERROR, + ("Doh! HCA Bridge never came back from reset!\n") ); + status = STATUS_UNSUCCESSFUL; + goto resetErr3; + } +#endif + + +#ifdef WIN_TO_BE_CHANGED + /* Restore the HCA's bridge configuration. */ + status = __restore_pci_config( &brBusIfc, &brConfig ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("Failed to restore bridge config.\n") ); + goto resetErr3; + } +#endif + + /* Restore the HCA's configuration. */ + status = __restore_pci_config( &hcaBusIfc, &hcaConfig ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("Failed to restore HCA config.\n") ); + } + +resetErr3: +#ifdef WIN_TO_BE_CHANGED + brBusIfc.InterfaceDereference( brBusIfc.Context ); + +resetErr2: +#endif + hcaBusIfc.InterfaceDereference( hcaBusIfc.Context ); + +resetErr1: + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +/* + * Returns the offset in configuration space of the PCI-X capabilites. + */ +static ULONG +__FindCapability( + IN PCI_COMMON_CONFIG* const pConfig, + IN char cap_id + ) +{ + ULONG offset = 0; + UCHAR *pBuf; + PCI_CAPABILITIES_HEADER *pHdr; + + HCA_ENTER( HCA_DBG_PNP ); + + pBuf = (UCHAR*)pConfig; + + ASSERT( pConfig->HeaderType == PCI_DEVICE_TYPE ); + + if( pConfig->u.type0.CapabilitiesPtr ) + { + pHdr = (PCI_CAPABILITIES_HEADER*) + (pBuf + pConfig->u.type0.CapabilitiesPtr); + } + else + { + pHdr = NULL; + } + + /* + * Fix up any fields that might cause changes to the + * device - like writing VPD data. + */ + while( pHdr ) + { + if( pHdr->CapabilityID == cap_id ) + { + offset = (UCHAR)(((ULONG_PTR)pHdr) - ((ULONG_PTR)pConfig)); + break; + } + + if( pHdr->Next ) + pHdr = (PCI_CAPABILITIES_HEADER*)(pBuf + pHdr->Next); + else + pHdr = NULL; + } + + HCA_EXIT( HCA_DBG_PNP ); + return offset; +} + + +/* + * Tunes PCI configuration as described in 13.3.2 in the Tavor PRM. + */ +NTSTATUS +hca_tune_pci( + IN DEVICE_OBJECT* const pDevObj ) +{ + NTSTATUS status; + PCI_COMMON_CONFIG hcaConfig; + BUS_INTERFACE_STANDARD hcaBusIfc; + ULONG len; + ULONG capOffset; + PCI_PCIX_CAPABILITY *pPciXCap; + PCI_PCIEXP_CAPABILITY *pPciExpCap; + + HCA_ENTER( HCA_DBG_PNP ); + + /* Get the HCA's bus interface. */ + status = __get_bus_ifc( pDevObj, &GUID_BUS_INTERFACE_STANDARD, &hcaBusIfc ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, ("Failed to get HCA bus interface.\n") ); + return status; + } + + /* Save the HCA's configuration. */ + status = __save_pci_config( &hcaBusIfc, &hcaConfig ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("Failed to save HCA config.\n") ); + status = STATUS_UNSUCCESSFUL; + goto tweakErr; + } + status = 0; + + /* + * PCIX Capability + */ + capOffset = __FindCapability( &hcaConfig, PCI_CAPABILITY_ID_PCIX ); + if( capOffset ) + { + pPciXCap = (PCI_PCIX_CAPABILITY*)(((UCHAR*)&hcaConfig) + capOffset); + /* Update the command field to max the read byte count if needed. */ + if( (pPciXCap->Command & 0x000C) != 0x000C ) + { + HCA_TRACE( HCA_DBG_WARN | HCA_DBG_PNP, + ("Updating max recv byte count of PCI-X capability.\n") ); + pPciXCap->Command = (pPciXCap->Command & ~PCI_X_CMD_MAX_READ) | (3 << 2); + len = hcaBusIfc.SetBusData( hcaBusIfc.Context, PCI_WHICHSPACE_CONFIG, + &pPciXCap->Command, + capOffset + offsetof( PCI_PCIX_CAPABILITY, Command), + sizeof( pPciXCap->Command ) ); + if( len != sizeof( pPciXCap->Command ) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("Failed to update PCI-X maximum read byte count.\n") ); + status = STATUS_UNSUCCESSFUL; + goto tweakErr; + } + } + } + + + /* + * PCI Express Capability + */ + capOffset = __FindCapability( &hcaConfig, PCI_CAPABILITY_ID_PCIEXP ); + if( capOffset ) + { + pPciExpCap = (PCI_PCIEXP_CAPABILITY*)(((UCHAR*)&hcaConfig) + capOffset); + + /* Update Max_Read_Request_Size. */ + HCA_TRACE( HCA_DBG_WARN | HCA_DBG_PNP, + ("Updating max recv byte count of PCI-X capability.\n") ); + pPciExpCap->DevControl = (pPciExpCap->DevControl & ~PCI_EXP_DEVCTL_READRQ) | (5 << 12); + len = hcaBusIfc.SetBusData( hcaBusIfc.Context, PCI_WHICHSPACE_CONFIG, + &pPciExpCap->DevControl, + capOffset + offsetof( PCI_PCIEXP_CAPABILITY, DevControl), + sizeof( pPciExpCap->DevControl ) ); + if( len != sizeof( pPciExpCap->DevControl ) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("Failed to update PCI-Exp maximum read byte count.\n") ); + goto tweakErr; + } + } + + +tweakErr: + hcaBusIfc.InterfaceDereference( hcaBusIfc.Context ); + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +/* leo */ + +NTSTATUS +hca_enable_pci( + IN DEVICE_OBJECT* const pDevObj, + OUT PCI_COMMON_CONFIG* pHcaConfig + ) +{ + NTSTATUS status; + BUS_INTERFACE_STANDARD hcaBusIfc; + PCI_PCIX_CAPABILITY *pPciXCap; + ULONG len; + + HCA_ENTER( HCA_DBG_PNP ); + + /* Get the HCA's bus interface. */ + status = __get_bus_ifc( pDevObj, &GUID_BUS_INTERFACE_STANDARD, &hcaBusIfc ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, ("Failed to get HCA bus interface.\n") ); + return STATUS_DEVICE_NOT_READY; + } + + /* Save the HCA's configuration. */ + status = __save_pci_config( &hcaBusIfc, pHcaConfig ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("Failed to save HCA config.\n") ); + goto pciErr; + } + + /* fix command register (set PCI Master bit) */ + // NOTE: we change here the saved value of the command register + pHcaConfig->Command |= 7; + len = hcaBusIfc.SetBusData( hcaBusIfc.Context, PCI_WHICHSPACE_CONFIG, + (PVOID)&pHcaConfig->Command , 4, sizeof(ULONG) ); + if( len != sizeof(ULONG) ) + { + HCA_TRACE( HCA_DBG_ERROR, ("Failed to write command register.\n") ); + status = STATUS_DEVICE_NOT_READY; + goto pciErr; + } + + pciErr: + hcaBusIfc.InterfaceDereference( hcaBusIfc.Context ); + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} diff --git a/branches/MTHCA/hw/mthca/kernel/hca_pci.h b/branches/MTHCA/hw/mthca/kernel/hca_pci.h new file mode 100644 index 00000000..5c5c3f90 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/hca_pci.h @@ -0,0 +1,19 @@ +#ifndef HCI_PCI_H +#define HCI_PCI_H + + +NTSTATUS +hca_reset( + IN DEVICE_OBJECT* const pDevObj ); + +NTSTATUS +hca_enable_pci( + IN DEVICE_OBJECT* const pDevObj, + OUT PCI_COMMON_CONFIG* pHcaConfig + ); + +NTSTATUS +hca_tune_pci( + IN DEVICE_OBJECT* const pDevObj ); + +#endif diff --git a/branches/MTHCA/hw/mthca/kernel/hca_pnp.c b/branches/MTHCA/hw/mthca/kernel/hca_pnp.c new file mode 100644 index 00000000..a54f7370 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/hca_pnp.c @@ -0,0 +1,1473 @@ +/* BEGIN_ICS_COPYRIGHT **************************************** +** END_ICS_COPYRIGHT ****************************************/ + +/* + $Revision: 1.1 $ +*/ + + +/* + * Provides the driver entry points for the Tavor VPD. + */ + + +#include +#include +#ifdef WIN_TO_BE_CHANGED +#include +#endif +#include "mthca.h" + +static NTSTATUS +hca_start( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ); + +static NTSTATUS +hca_query_stop( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ); + +static NTSTATUS +hca_stop( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ); + +static NTSTATUS +hca_cancel_stop( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ); + +static NTSTATUS +hca_query_remove( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ); + +static void +hca_release_resources( + IN DEVICE_OBJECT* const pDevObj ); + +static NTSTATUS +hca_cancel_remove( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ); + +static NTSTATUS +hca_surprise_remove( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ); + +static NTSTATUS +hca_query_capabilities( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ); + +static NTSTATUS +hca_query_pnp_state( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ); + +static NTSTATUS +hca_query_bus_relations( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ); + +static NTSTATUS +hca_query_removal_relations( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ); + +static NTSTATUS +hca_query_power( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ); + +static NTSTATUS +hca_set_power( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ); + +static ci_interface_t* +__alloc_hca_ifc( + IN hca_dev_ext_t* const p_ext ); + +static NTSTATUS +__get_ci_interface( + IN DEVICE_OBJECT* const pDevObj ); + +static NTSTATUS +__hca_register( + IN DEVICE_OBJECT *pDevObj ); + +static NTSTATUS +__pnp_notify_target( + IN void *pNotifyStruct, + IN void *context ); + +static NTSTATUS +__pnp_notify_ifc( + IN void *pNotifyStruct, + IN void *context ); + + +#ifdef ALLOC_PRAGMA +#pragma alloc_text (PAGE, hca_add_device) +#pragma alloc_text (PAGE, hca_start) +#pragma alloc_text (PAGE, hca_query_stop) +#pragma alloc_text (PAGE, hca_stop) +#pragma alloc_text (PAGE, hca_cancel_stop) +#pragma alloc_text (PAGE, hca_query_remove) +#pragma alloc_text (PAGE, hca_release_resources) +#pragma alloc_text (PAGE, hca_cancel_remove) +#pragma alloc_text (PAGE, hca_surprise_remove) +#pragma alloc_text (PAGE, hca_query_capabilities) +#pragma alloc_text (PAGE, hca_query_pnp_state) +#pragma alloc_text (PAGE, hca_query_bus_relations) +#pragma alloc_text (PAGE, hca_query_removal_relations) +#pragma alloc_text (PAGE, hca_set_power) +#pragma alloc_text (PAGE, __alloc_hca_ifc) +#pragma alloc_text (PAGE, __get_ci_interface) +#pragma alloc_text (PAGE, __hca_register) +#pragma alloc_text (PAGE, __pnp_notify_target) +#pragma alloc_text (PAGE, __pnp_notify_ifc) +#endif + + +static cl_vfptr_pnp_po_t vfptrHcaPnp; + + +void +hca_init_vfptr( void ) +{ + vfptrHcaPnp.identity = "HCA driver"; + vfptrHcaPnp.pfn_start = hca_start; + vfptrHcaPnp.pfn_query_stop = hca_query_stop; + vfptrHcaPnp.pfn_stop = hca_stop; + vfptrHcaPnp.pfn_cancel_stop = hca_cancel_stop; + vfptrHcaPnp.pfn_query_remove = hca_query_remove; + vfptrHcaPnp.pfn_release_resources = hca_release_resources; + vfptrHcaPnp.pfn_remove = cl_do_remove; + vfptrHcaPnp.pfn_cancel_remove = hca_cancel_remove; + vfptrHcaPnp.pfn_surprise_remove = hca_surprise_remove; + vfptrHcaPnp.pfn_query_capabilities = hca_query_capabilities; + vfptrHcaPnp.pfn_query_pnp_state = hca_query_pnp_state; + vfptrHcaPnp.pfn_filter_res_req = cl_irp_skip; + vfptrHcaPnp.pfn_dev_usage_notification = cl_do_sync_pnp; + vfptrHcaPnp.pfn_query_bus_relations = hca_query_bus_relations; + vfptrHcaPnp.pfn_query_ejection_relations = cl_irp_ignore; + vfptrHcaPnp.pfn_query_removal_relations = hca_query_removal_relations; + vfptrHcaPnp.pfn_query_target_relations = cl_irp_ignore; + vfptrHcaPnp.pfn_unknown = cl_irp_ignore; + vfptrHcaPnp.pfn_query_resources = cl_irp_ignore; + vfptrHcaPnp.pfn_query_res_req = cl_irp_ignore; + vfptrHcaPnp.pfn_query_bus_info = cl_irp_ignore; + vfptrHcaPnp.pfn_query_interface = cl_irp_ignore; + vfptrHcaPnp.pfn_read_config = cl_irp_ignore; + vfptrHcaPnp.pfn_write_config = cl_irp_ignore; + vfptrHcaPnp.pfn_eject = cl_irp_ignore; + vfptrHcaPnp.pfn_set_lock = cl_irp_ignore; + vfptrHcaPnp.pfn_query_power = hca_query_power; + vfptrHcaPnp.pfn_set_power = hca_set_power; + vfptrHcaPnp.pfn_power_sequence = cl_irp_ignore; + vfptrHcaPnp.pfn_wait_wake = cl_irp_ignore; +} + + +NTSTATUS +hca_add_device( + IN PDRIVER_OBJECT pDriverObj, + IN PDEVICE_OBJECT pPdo ) +{ + NTSTATUS status; + DEVICE_OBJECT *pDevObj, *pNextDevObj; + hca_dev_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_PNP ); + + /* + * Create the device so that we have a device extension to store stuff in. + */ + status = IoCreateDevice( pDriverObj, sizeof(hca_dev_ext_t), + NULL, FILE_DEVICE_INFINIBAND, FILE_DEVICE_SECURE_OPEN, + FALSE, &pDevObj ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("IoCreateDevice returned 0x%08X.\n", status) ); + return status; + } + + p_ext = (hca_dev_ext_t*)pDevObj->DeviceExtension; + cl_memclr( p_ext, sizeof(hca_dev_ext_t) ); + + /* Attach to the device stack. */ + pNextDevObj = IoAttachDeviceToDeviceStack( pDevObj, pPdo ); + if( !pNextDevObj ) + { + //cl_event_destroy( &p_ext->mutex ); + IoDeleteDevice( pDevObj ); + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("IoAttachDeviceToDeviceStack failed.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + /* Inititalize the complib extension. */ + cl_init_pnp_po_ext( pDevObj, pNextDevObj, pPdo, g_mlnx_dbg_lvl, + &vfptrHcaPnp, NULL ); + + p_ext->state = HCA_ADDED; + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static NTSTATUS +__get_ci_interface( + IN DEVICE_OBJECT* const pDevObj ) +{ + NTSTATUS status; + IRP *pIrp; + hca_dev_ext_t *p_ext; + IO_STATUS_BLOCK ioStatus; + IO_STACK_LOCATION *pIoStack; + KEVENT event; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = (hca_dev_ext_t*)pDevObj->DeviceExtension; + + KeInitializeEvent( &event, NotificationEvent, FALSE ); + + /* Query for the verbs interface. */ + pIrp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP, p_ext->p_al_dev, + NULL, 0, NULL, &event, &ioStatus ); + if( !pIrp ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("IoBuildSynchronousFsdRequest failed.\n") ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + /* Format the IRP. */ + pIoStack = IoGetNextIrpStackLocation( pIrp ); + pIoStack->MinorFunction = IRP_MN_QUERY_INTERFACE; + pIoStack->Parameters.QueryInterface.Version = IB_CI_INTERFACE_VERSION; + pIoStack->Parameters.QueryInterface.Size = sizeof(ib_ci_ifc_t); + pIoStack->Parameters.QueryInterface.Interface = + (INTERFACE*)&p_ext->ci_ifc; + pIoStack->Parameters.QueryInterface.InterfaceSpecificData = NULL; + pIoStack->Parameters.QueryInterface.InterfaceType = + &GUID_IB_CI_INTERFACE; + pIrp->IoStatus.Status = STATUS_NOT_SUPPORTED; + + /* Send the IRP. */ + status = IoCallDriver( p_ext->p_al_dev, pIrp ); + if( status == STATUS_PENDING ) + { + KeWaitForSingleObject( &event, Executive, KernelMode, + FALSE, NULL ); + + status = ioStatus.Status; + } + + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("Query interface for verbs returned %08x.\n", status) ); + return status; + } + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static NTSTATUS +__pnp_notify_target( + IN void *pNotifyStruct, + IN void *context ) +{ + NTSTATUS status = STATUS_SUCCESS; + DEVICE_OBJECT *pDevObj; + hca_dev_ext_t *p_ext; + TARGET_DEVICE_REMOVAL_NOTIFICATION *pNotify; + + HCA_ENTER( HCA_DBG_PNP ); + + pNotify = (TARGET_DEVICE_REMOVAL_NOTIFICATION*)pNotifyStruct; + pDevObj = (DEVICE_OBJECT*)context; + p_ext = (hca_dev_ext_t*)pDevObj->DeviceExtension; + + if( IsEqualGUID( &pNotify->Event, &GUID_TARGET_DEVICE_QUERY_REMOVE ) ) + { + if( p_ext->state == HCA_REGISTERED ) + { + /* Release AL's CI interface. */ + p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); + p_ext->state = HCA_STARTED; + } + + /* Release AL's file object so that it can unload. */ + CL_ASSERT( p_ext->p_al_file_obj ); + CL_ASSERT( p_ext->p_al_file_obj == pNotify->FileObject ); + ObDereferenceObject( p_ext->p_al_file_obj ); + p_ext->p_al_file_obj = NULL; + p_ext->p_al_dev = NULL; + } + else if( IsEqualGUID( &pNotify->Event, + &GUID_TARGET_DEVICE_REMOVE_COMPLETE ) ) + { + if( p_ext->state == HCA_REGISTERED ) + { + /* Release AL's CI interface. */ + p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); + p_ext->state = HCA_STARTED; + } + + /* Release AL's file object so that it can unload. */ + if( p_ext->p_al_file_obj ) + { + ObDereferenceObject( p_ext->p_al_file_obj ); + p_ext->p_al_file_obj = NULL; + p_ext->p_al_dev = NULL; + } + + /* Cancel our target device change registration. */ + IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry ); + p_ext->pnp_target_entry = NULL; + } + else if( IsEqualGUID( &pNotify->Event, + &GUID_TARGET_DEVICE_REMOVE_CANCELLED ) ) + { + /* Cancel our target device change registration. */ + IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry ); + p_ext->pnp_target_entry = NULL; + + /* Get the device object pointer for the AL. */ + CL_ASSERT( !p_ext->p_al_file_obj ); + CL_ASSERT( !p_ext->p_al_dev ); + p_ext->p_al_file_obj = pNotify->FileObject; + p_ext->p_al_dev = IoGetRelatedDeviceObject( p_ext->p_al_file_obj ); + + status = IoRegisterPlugPlayNotification( + EventCategoryTargetDeviceChange, 0, p_ext->p_al_file_obj, + pDevObj->DriverObject, __pnp_notify_target, pDevObj, + &p_ext->pnp_target_entry ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("IoRegisterPlugPlayNotification returned %08x.\n") ); + return status; + } + + __hca_register( pDevObj ); + } + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static ci_interface_t* +__alloc_hca_ifc( + IN hca_dev_ext_t* const p_ext ) +{ + ci_interface_t *pIfc; + + HCA_ENTER( HCA_DBG_PNP ); + + pIfc = + (ci_interface_t*)ExAllocatePool( PagedPool, sizeof(ci_interface_t) ); + if( !pIfc ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("Failed to allocate ci_interface_t (%d bytes).\n", + sizeof(ci_interface_t)) ); + return NULL; + } + + setup_ci_interface( p_ext->hca.guid, pIfc ); + + pIfc->p_hca_dev = p_ext->cl_ext.p_pdo; + pIfc->vend_id = (uint32_t)p_ext->hcaConfig.VendorID; + pIfc->dev_id = (uint16_t)p_ext->hcaConfig.DeviceID; + pIfc->dev_revision = (uint16_t)p_ext->hca.hw_ver; + + HCA_EXIT( HCA_DBG_PNP ); + return pIfc; +} + + +static NTSTATUS +__hca_register( + IN DEVICE_OBJECT *pDevObj ) +{ + hca_dev_ext_t *p_ext; + NTSTATUS status; + ib_api_status_t ib_status; + ci_interface_t *p_hca_ifc; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = (hca_dev_ext_t*)pDevObj->DeviceExtension; + + ASSERT( p_ext->state == HCA_STARTED ); + ASSERT( p_ext->p_al_dev ); + + /* Get the AL's lower interface. */ + status = __get_ci_interface( pDevObj ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("__get_ci_interface returned %08x.\n", status) ); + return status; + } + + /* Allocate and populate our HCA interface structure. */ + p_hca_ifc = __alloc_hca_ifc( p_ext ); + if( !p_hca_ifc ) + { + HCA_TRACE( HCA_DBG_ERROR, ("__alloc_hca_ifc failed.\n") ); + return STATUS_NO_MEMORY; + } + + /* Notify AL that we're available... */ + ib_status = p_ext->ci_ifc.register_ca( p_hca_ifc ); + ExFreePool( p_hca_ifc ); + if( ib_status != IB_SUCCESS ) + { + p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + p_ext->state = HCA_REGISTERED; + return STATUS_SUCCESS; +} + + +static NTSTATUS +__pnp_notify_ifc( + IN void *pNotifyStruct, + IN void *context ) +{ + NTSTATUS status; + DEVICE_OBJECT *pDevObj; + hca_dev_ext_t *p_ext; + DEVICE_INTERFACE_CHANGE_NOTIFICATION *pNotify; + + HCA_ENTER( HCA_DBG_PNP ); + + pNotify = (DEVICE_INTERFACE_CHANGE_NOTIFICATION*)pNotifyStruct; + pDevObj = (DEVICE_OBJECT*)context; + p_ext = (hca_dev_ext_t*)pDevObj->DeviceExtension; + + if( !IsEqualGUID( &pNotify->Event, &GUID_DEVICE_INTERFACE_ARRIVAL ) ) + { + HCA_EXIT( HCA_DBG_PNP ); + return STATUS_SUCCESS; + } + + /* + * Sanity check. We should only be getting notifications of the + * CI interface exported by AL. + */ + ASSERT( + IsEqualGUID( &pNotify->InterfaceClassGuid, &GUID_IB_CI_INTERFACE ) ); + + if( p_ext->state != HCA_STARTED ) + { + HCA_TRACE( HCA_DBG_ERROR, ("Invalid state: %d\n", p_ext->state) ); + return STATUS_SUCCESS; + } + + ASSERT( !p_ext->p_al_dev ); + ASSERT( !p_ext->p_al_file_obj ); + + /* Get the AL device object. */ + HCA_TRACE( HCA_DBG_PNP, ("Calling IoGetDeviceObjectPointer.\n") ); + status = IoGetDeviceObjectPointer( pNotify->SymbolicLinkName, + FILE_ALL_ACCESS, &p_ext->p_al_file_obj, &p_ext->p_al_dev ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("IoGetDeviceObjectPointer returned %08x.\n", status) ); + return STATUS_SUCCESS; + } + + /* Register for removal notification of the IB Fabric root device. */ + HCA_TRACE( HCA_DBG_PNP, + ("Registering for target notifications.\n") ); + status = IoRegisterPlugPlayNotification( + EventCategoryTargetDeviceChange, 0, p_ext->p_al_file_obj, + pDevObj->DriverObject, __pnp_notify_target, pDevObj, + &p_ext->pnp_target_entry ); + if( !NT_SUCCESS( status ) ) + { + ObDereferenceObject( p_ext->p_al_file_obj ); + p_ext->p_al_file_obj = NULL; + p_ext->p_al_dev = NULL; + HCA_TRACE( HCA_DBG_ERROR, + ("IoRegisterPlugPlayNotification returned %08x.\n", status) ); + return STATUS_SUCCESS; + } + + status = __hca_register( pDevObj ); + if( !NT_SUCCESS( status ) ) + { + IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry ); + p_ext->pnp_target_entry = NULL; + ObDereferenceObject( p_ext->p_al_file_obj ); + p_ext->p_al_file_obj = NULL; + p_ext->p_al_dev = NULL; + HCA_TRACE( HCA_DBG_ERROR, + ("__get_ci_interface returned %08x.\n", status) ); + return STATUS_SUCCESS; + } + + HCA_EXIT( HCA_DBG_PNP ); + return STATUS_SUCCESS; +} + + +/* + * Walk the resource lists and store the information. The write-only + * flag is not set for the UAR region, so it is indistinguishable from the + * DDR region since both are prefetchable. The code here assumes that the + * resources get handed in order - HCR, UAR, DDR. + * - Configuration Space: not prefetchable, read/write + * - UAR space: prefetchable, write only. + * - DDR: prefetchable, read/write. + */ +static NTSTATUS +__SetupHcaResources( + IN DEVICE_OBJECT* const pDevObj, + IN CM_RESOURCE_LIST* const pHcaResList, + IN CM_RESOURCE_LIST* const pHostResList ) +{ + NTSTATUS status = STATUS_SUCCESS; + hca_dev_ext_t *p_ext; + USHORT i; + hca_bar_type_t type = HCA_BAR_TYPE_HCR; + + CM_PARTIAL_RESOURCE_DESCRIPTOR *pHcaRes, *pHostRes; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = (hca_dev_ext_t*)pDevObj->DeviceExtension; + + ASSERT( pHostResList->List[0].PartialResourceList.Version == 1 ); + ASSERT( pHostResList->List[0].PartialResourceList.Revision == 1 ); + + for( i = 0; i < pHostResList->List[0].PartialResourceList.Count; i++ ) + { + pHcaRes = + &pHcaResList->List[0].PartialResourceList.PartialDescriptors[i]; + pHostRes = + &pHostResList->List[0].PartialResourceList.PartialDescriptors[i]; + + /* + * Save the interrupt information so that we can power the device + * up and down. Since the device will lose state when powered down + * we have to fully disable it. Note that we can leave memory mapped + * resources in place when powered down as the resource assignments + * won't change. However, we must disconnect our interrupt, and + * reconnect it when powering up. + */ + if( pHcaRes->Type == CmResourceTypeInterrupt ) + { + p_ext->interruptInfo = *pHostRes; + continue; + } + + if( pHcaRes->Type != CmResourceTypeMemory ) + continue; + + /* + * Sanity check that our assumption on how resources + * are reported hold. + */ + if( type == HCA_BAR_TYPE_HCR && + (pHcaRes->Flags & CM_RESOURCE_MEMORY_PREFETCHABLE) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("First memory resource is prefetchable - expected HCR.\n") ); + status = STATUS_UNSUCCESSFUL; + break; + } + + p_ext->bar[type].phys = pHcaRes->u.Memory.Start.QuadPart; + p_ext->bar[type].size = pHcaRes->u.Memory.Length; +#ifdef MAP_ALL_HCA_MEMORY + /*leo: no need to map all the resources */ + p_ext->bar[type].virt = MmMapIoSpace( pHostRes->u.Memory.Start, + pHostRes->u.Memory.Length, MmNonCached ); + if( !p_ext->bar[type].virt ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("Failed to map memory resource type %d\n", type) ); + status = STATUS_UNSUCCESSFUL; + break; + } +#else + p_ext->bar[type].virt = NULL; +#endif + + type++; + } + + if( type == HCA_BAR_TYPE_DDR) + { + p_ext->hca_hidden = 1; + } + else + if( type != HCA_BAR_TYPE_MAX ) + { + HCA_TRACE( HCA_DBG_ERROR, ("Failed to map all memory resources.\n") ); + status = STATUS_UNSUCCESSFUL; + } + + if( p_ext->interruptInfo.Type != CmResourceTypeInterrupt ) + { + HCA_TRACE( HCA_DBG_ERROR, ("No interrupt resource.\n") ); + status = STATUS_UNSUCCESSFUL; + } + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static void +__UnmapHcaMemoryResources( + IN DEVICE_OBJECT* const pDevObj ) +{ + hca_dev_ext_t *p_ext; + USHORT i; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = (hca_dev_ext_t*)pDevObj->DeviceExtension; + + for( i = 0; i < HCA_BAR_TYPE_MAX; i++ ) + { + if( p_ext->bar[i].virt ) + { + MmUnmapIoSpace( p_ext->bar[i].virt, p_ext->bar[i].size ); + cl_memclr( &p_ext->bar[i], sizeof(hca_bar_t) ); + } + } + + HCA_EXIT( HCA_DBG_PNP ); +} + + +static NTSTATUS +hca_start( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ) +{ + NTSTATUS status; + hca_dev_ext_t *p_ext; + IO_STACK_LOCATION *pIoStack; + POWER_STATE powerState; + DEVICE_DESCRIPTION devDesc; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = (hca_dev_ext_t*)pDevObj->DeviceExtension; + + /* Handled on the way up. */ + status = cl_do_sync_pnp( pDevObj, pIrp, pAction ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("Lower drivers failed IRP_MN_START_DEVICE.\n") ); + return status; + } + + pIoStack = IoGetCurrentIrpStackLocation( pIrp ); + + /* + * Walk the resource lists and store the information. The write-only + * flag is not set for the UAR region, so it is indistinguishable from the + * DDR region since both are prefetchable. The code here assumes that the + * resources get handed in order - HCR, UAR, DDR. + * - Configuration Space: not prefetchable, read/write + * - UAR space: prefetchable, write only. + * - DDR: prefetchable, read/write. + */ + status = __SetupHcaResources( pDevObj, + pIoStack->Parameters.StartDevice.AllocatedResources, + pIoStack->Parameters.StartDevice.AllocatedResourcesTranslated ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("__ProcessResources returned %08X.\n", status) ); + return status; + } + + /* save PCI configuration info and enable device */ + hca_enable_pci( pDevObj, &p_ext->hcaConfig ); + + /* + * Get the DMA adapter representing the HCA so we can + * allocate common buffers. + */ + RtlZeroMemory( &devDesc, sizeof(devDesc) ); + devDesc.Version = DEVICE_DESCRIPTION_VERSION2; + devDesc.Master = TRUE; + devDesc.ScatterGather = TRUE; + devDesc.Dma64BitAddresses = TRUE; + devDesc.InterfaceType = PCIBus; + + //TODO: what about Arbel ? Has it the same limit ? Is it the right place to call IoGetDmaAdapter ? + /* Tavor has a limit of 2GB for data transfer lengths. */ + devDesc.MaximumLength = 0x80000000; + + p_ext->p_dma_adapter = IoGetDmaAdapter( + p_ext->cl_ext.p_pdo, &devDesc, &p_ext->n_map_regs ); + if( !p_ext->p_dma_adapter ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("Failed to get DMA_ADAPTER for HCA.\n") ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + /* Initialize the HCA now. */ + status = mthca_init_one( p_ext ); + if( !NT_SUCCESS( status ) ) + { + //TODO: no cleanup on error + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("mthca_start returned %08X\n", status) ); + return status; + } + + /* + * Change the state since the PnP callback can happen + * before the callback returns. + */ + p_ext->state = HCA_STARTED; + + /*leo: get node GUID */ + { + int err = mthca_get_dev_info( p_ext->hca.mdev, &p_ext->hca.guid, &p_ext->hca.hw_ver ); + if (err) { + //TODO: no cleanup on error + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("can't get guid - mthca_query_port() failed (%08X)\n", err) ); + return STATUS_INSUFFICIENT_RESOURCES; + } + } + + /* queue HCA */ + mlnx_hca_insert( &p_ext->hca ); + + /* + * Change the state since the PnP callback can happen + * before the callback returns. + */ + p_ext->state = HCA_STARTED; + + /* Register for interface arrival of the IB_AL device. */ + status = IoRegisterPlugPlayNotification( + EventCategoryDeviceInterfaceChange, + PNPNOTIFY_DEVICE_INTERFACE_INCLUDE_EXISTING_INTERFACES, + (void*)&GUID_IB_CI_INTERFACE, pDevObj->DriverObject, + __pnp_notify_ifc, pDevObj, &p_ext->pnp_ifc_entry ); + if( !NT_SUCCESS( status ) ) + { + p_ext->state = HCA_ADDED; + HCA_TRACE( HCA_DBG_ERROR, + ("IoRegisterPlugPlayNotification returned %08x.\n", status) ); + } + + /* We get started fully powered. */ + p_ext->PowerState = PowerDeviceD0; + powerState.DeviceState = PowerDeviceD0; + PoSetPowerState ( p_ext->cl_ext.p_self_do, DevicePowerState, powerState ); + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static void +hca_release_resources( + IN DEVICE_OBJECT* const pDevObj ) +{ + hca_dev_ext_t *p_ext; + POWER_STATE powerState; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = (hca_dev_ext_t*)pDevObj->DeviceExtension; + + if( p_ext->state == HCA_REGISTERED ) + { + CL_ASSERT( p_ext->ci_ifc.deregister_ca ); + CL_ASSERT( p_ext->p_al_dev ); + CL_ASSERT( p_ext->p_al_file_obj ); + /* Notify AL that the CA is being removed. */ + p_ext->ci_ifc.deregister_ca( p_ext->hca.guid ); + /* Release AL's CI interface. */ + p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); + } + + if( p_ext->pnp_target_entry ) + { + ASSERT( p_ext->pnp_ifc_entry ); + IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry ); + } + + if( p_ext->pnp_ifc_entry ) + IoUnregisterPlugPlayNotification( p_ext->pnp_ifc_entry ); + + if( p_ext->p_al_file_obj ) + ObDereferenceObject( p_ext->p_al_file_obj ); + + mthca_remove_one( p_ext ); + + if( p_ext->p_dma_adapter ) + p_ext->p_dma_adapter->DmaOperations->PutDmaAdapter( p_ext->p_dma_adapter ); + + //cl_event_destroy( &p_ext->mutex ); + __UnmapHcaMemoryResources( pDevObj ); + + /* Notify the power manager that the device is powered down. */ + powerState.DeviceState = PowerDeviceD3; + PoSetPowerState ( p_ext->cl_ext.p_self_do, DevicePowerState, powerState ); + + /* Clear the PnP state in case we get restarted. */ + p_ext->pnpState = 0; + + HCA_EXIT( HCA_DBG_PNP ); +} + + +static NTSTATUS +hca_query_removal_relations( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ) +{ + NTSTATUS status; + hca_dev_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = (hca_dev_ext_t*)pDevObj->DeviceExtension; + + if( p_ext->state == HCA_REGISTERED ) + { + status = p_ext->ci_ifc.get_relations( p_ext->hca.guid, pIrp ); + if( !NT_SUCCESS( status ) ) + { + *pAction = IrpComplete; + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("AL get_relations returned %08x.\n", status) ); + return status; + } + } + + *pAction = IrpPassDown; + HCA_EXIT( HCA_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +hca_query_bus_relations( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ) +{ + return cl_irp_skip( pDevObj, pIrp, pAction ); + //NTSTATUS status; + //hca_dev_ext_t *p_ext; + + //HCA_ENTER( HCA_DBG_PNP ); + + //p_ext = pDevObj->DeviceExtension; + + //if( p_ext->state == HCA_REGISTERED ) + //{ + // status = p_ext->ci_ifc.get_relations( p_ext->hca.guid, pIrp ); + // if( !NT_SUCCESS( status ) ) + // { + // *pAction = IrpComplete; + // HCA_TRACE_EXIT( HCA_DBG_ERROR, + // ("AL get_relations returned %08x.\n", status) ); + // return status; + // } + //} + + //*pAction = IrpPassDown; + //HCA_EXIT( HCA_DBG_PNP ); + //return STATUS_SUCCESS; +} + + +static NTSTATUS +hca_query_stop( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ) +{ + /* All kernel clients will get notified through the device hierarchy. */ + + /* TODO: set a flag to fail creation of any new IB resources. */ + return cl_irp_skip( pDevObj, pIrp, pAction ); +} + + +static NTSTATUS +hca_stop( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ) +{ + /* + * Must disable everything. Complib framework will + * call ReleaseResources handler. + */ + return cl_irp_skip( pDevObj, pIrp, pAction ); +} + + +static NTSTATUS +hca_cancel_stop( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ) +{ + /* Handled on the way up. */ + return cl_do_sync_pnp( pDevObj, pIrp, pAction ); +} + + +static NTSTATUS +hca_query_remove( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ) +{ + /* Query remove always succeeds. */ + /* TODO: set a flag to fail creation of any new IB resources. */ + return cl_irp_skip( pDevObj, pIrp, pAction ); +} + + +static NTSTATUS +hca_cancel_remove( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ) +{ + /* Handled on the way up. */ + return cl_do_sync_pnp( pDevObj, pIrp, pAction ); +} + + +static NTSTATUS +hca_surprise_remove( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ) +{ + /* + * TODO: Set state so that all further requests + * automatically succeed/fail as needed. + */ + return cl_irp_skip( pDevObj, pIrp, pAction ); +} + + +static NTSTATUS +hca_query_capabilities( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ) +{ + NTSTATUS status; + hca_dev_ext_t *p_ext; + IO_STACK_LOCATION *pIoStack; + DEVICE_CAPABILITIES *pCaps; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = (hca_dev_ext_t*)pDevObj->DeviceExtension; + + /* Process on the way up. */ + status = cl_do_sync_pnp( pDevObj, pIrp, pAction ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("cl_do_sync_pnp returned %08X.\n", status) ); + return status; + } + + pIoStack = IoGetCurrentIrpStackLocation( pIrp ); + pCaps = pIoStack->Parameters.DeviceCapabilities.Capabilities; + + /* + * Store the device power mapping into our extension since we're + * the power policy owner. The mapping is used when handling + * IRP_MN_SET_POWER IRPs. + */ + cl_memcpy( + p_ext->DevicePower, pCaps->DeviceState, sizeof(p_ext->DevicePower) ); + + if( pCaps->DeviceD1 ) + { + HCA_TRACE( HCA_DBG_WARN | HCA_DBG_PNP, + ("WARNING: Device reports support for DeviceD1 power state.\n") ); + pCaps->DeviceD1 = FALSE; + } + + if( pCaps->DeviceD2 ) + { + HCA_TRACE( HCA_DBG_WARN | HCA_DBG_PNP, + ("WARINING: Device reports support for DeviceD2 power state.\n") ); + pCaps->DeviceD2 = FALSE; + } + + if( pCaps->SystemWake != PowerSystemUnspecified ) + { + HCA_TRACE( HCA_DBG_WARN | HCA_DBG_PNP, + ("WARINING: Device reports support for system wake.\n") ); + pCaps->SystemWake = PowerSystemUnspecified; + } + + if( pCaps->DeviceWake != PowerDeviceUnspecified ) + { + HCA_TRACE( HCA_DBG_WARN | HCA_DBG_PNP, + ("WARINING: Device reports support for device wake.\n") ); + pCaps->DeviceWake = PowerDeviceUnspecified; + } + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static NTSTATUS +hca_query_pnp_state( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ) +{ + hca_dev_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = (hca_dev_ext_t*)pDevObj->DeviceExtension; + + pIrp->IoStatus.Information |= p_ext->pnpState; + + *pAction = IrpSkip; + + HCA_EXIT( HCA_DBG_PNP ); + return STATUS_SUCCESS;; +} + + +static NTSTATUS +hca_query_power( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ) +{ + NTSTATUS status = STATUS_SUCCESS; + IO_STACK_LOCATION *pIoStack; + + HCA_ENTER( HCA_DBG_PO ); + + UNUSED_PARAM( pDevObj ); + + pIoStack = IoGetCurrentIrpStackLocation( pIrp ); + + switch( pIoStack->Parameters.Power.Type ) + { + case SystemPowerState: + /* Fail any requests to hibernate or sleep the system. */ + switch( pIoStack->Parameters.Power.State.SystemState ) + { + case PowerSystemWorking: + case PowerSystemShutdown: + /* We only support fully working and shutdown system states. */ + break; + + default: + status = STATUS_NOT_SUPPORTED; + } + break; + + case DevicePowerState: + /* Fail any query for low power states. */ + switch( pIoStack->Parameters.Power.State.DeviceState ) + { + case PowerDeviceD0: + case PowerDeviceD3: + /* We only support fully powered or off power states. */ + break; + + default: + status = STATUS_NOT_SUPPORTED; + } + break; + } + + if( status == STATUS_NOT_SUPPORTED ) + *pAction = IrpComplete; + else + *pAction = IrpSkip; + + HCA_EXIT( HCA_DBG_PO ); + return status; +} + + +static void +__RequestPowerCompletion( + IN DEVICE_OBJECT *pDevObj, + IN UCHAR minorFunction, + IN POWER_STATE powerState, + IN void *context, + IN IO_STATUS_BLOCK *pIoStatus ) +{ + IRP *pIrp; + cl_pnp_po_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_PO ); + + UNUSED_PARAM( minorFunction ); + UNUSED_PARAM( powerState ); + + pIrp = (IRP*)context; + p_ext = (cl_pnp_po_ext_t*)pDevObj->DeviceExtension; + + /* Propagate the device IRP status to the system IRP status. */ + pIrp->IoStatus.Status = pIoStatus->Status; + + /* Continue Power IRP processing. */ + PoStartNextPowerIrp( pIrp ); + IoCompleteRequest( pIrp, IO_NO_INCREMENT ); + IoReleaseRemoveLock( &p_ext->remove_lock, pIrp ); + HCA_EXIT( HCA_DBG_PO ); +} + + +/*NOTE: Completion routines must NEVER be pageable. */ +static NTSTATUS +__SystemPowerCompletion( + IN DEVICE_OBJECT *pDevObj, + IN IRP *pIrp, + IN void *context ) +{ + NTSTATUS status; + POWER_STATE state; + hca_dev_ext_t *p_ext; + IO_STACK_LOCATION *pIoStack; + + HCA_ENTER( HCA_DBG_PO ); + + UNUSED_PARAM( context ); + + p_ext = (hca_dev_ext_t*)pDevObj->DeviceExtension; + pIoStack = IoGetCurrentIrpStackLocation( pIrp ); + + if( !NT_SUCCESS( pIrp->IoStatus.Status ) ) + { + PoStartNextPowerIrp( pIrp ); + IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, pIrp ); + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("IRP_MN_SET_POWER for system failed by lower driver with %08x.\n", + pIrp->IoStatus.Status) ); + return STATUS_SUCCESS; + } + + state.DeviceState = + p_ext->DevicePower[pIoStack->Parameters.Power.State.SystemState]; + + /* + * Send a device power IRP to our devnode. Using our device object will + * only work on win2k and other NT based systems. + */ + status = PoRequestPowerIrp( pDevObj, IRP_MN_SET_POWER, state, + __RequestPowerCompletion, pIrp, NULL ); + + if( !NT_SUCCESS( pIrp->IoStatus.Status ) ) + { + PoStartNextPowerIrp( pIrp ); + /* Propagate the failure. */ + pIrp->IoStatus.Status = status; + IoCompleteRequest( pIrp, IO_NO_INCREMENT ); + IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, pIrp ); + HCA_TRACE( HCA_DBG_ERROR, + ("PoRequestPowerIrp returned %08x.\n", status) ); + } + + HCA_EXIT( HCA_DBG_PO ); + return STATUS_MORE_PROCESSING_REQUIRED; +} + + +/* Work item callback to handle DevicePowerD0 IRPs at passive level. */ +static void +__PowerUpCb( + IN DEVICE_OBJECT* pDevObj, + IN void* context ) +{ + NTSTATUS status; + IO_STACK_LOCATION *pIoStack; + hca_dev_ext_t *p_ext; + IRP *pIrp; + + HCA_ENTER( HCA_DBG_PO ); + + p_ext = (hca_dev_ext_t*)pDevObj->DeviceExtension; + pIrp = (IRP*)context; + pIoStack = IoGetCurrentIrpStackLocation( pIrp ); + + IoFreeWorkItem( p_ext->pPoWorkItem ); + p_ext->pPoWorkItem = NULL; + + status = mthca_init_one( p_ext ); + if( !NT_SUCCESS( status ) ) + goto done; + + if( p_ext->p_al_dev ) + status = __hca_register( pDevObj ); + +done: + if( !NT_SUCCESS( status ) ) + { + /* Flag device as having failed. */ + p_ext->pnpState |= PNP_DEVICE_FAILED; + IoInvalidateDeviceState( p_ext->cl_ext.p_pdo ); + } + + PoStartNextPowerIrp( pIrp ); + IoCompleteRequest( pIrp, IO_NO_INCREMENT ); + IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, pIrp ); + + HCA_EXIT( HCA_DBG_PO ); +} + + +/*NOTE: Completion routines must NEVER be pageable. */ +static NTSTATUS +__DevicePowerCompletion( + IN DEVICE_OBJECT *pDevObj, + IN IRP *pIrp, + IN void *context ) +{ + NTSTATUS status; + hca_dev_ext_t *p_ext; + IO_STACK_LOCATION *pIoStack; + + HCA_ENTER( HCA_DBG_PO ); + + UNUSED_PARAM( context ); + + p_ext = (hca_dev_ext_t*)pDevObj->DeviceExtension; + pIoStack = IoGetCurrentIrpStackLocation( pIrp ); + + if( !NT_SUCCESS( pIrp->IoStatus.Status ) ) + { + PoStartNextPowerIrp( pIrp ); + IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, pIrp ); + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("IRP_MN_SET_POWER for device failed by lower driver with %08x.\n", + pIrp->IoStatus.Status) ); + return STATUS_SUCCESS; + } + + p_ext->PowerState = pIoStack->Parameters.Power.State.DeviceState; + PoSetPowerState( pDevObj, DevicePowerState, + pIoStack->Parameters.Power.State ); + + /* Process in a work item - mthca_start blocks. */ + ASSERT( !p_ext->pPoWorkItem ); + p_ext->pPoWorkItem = IoAllocateWorkItem( pDevObj ); + if( !p_ext->pPoWorkItem ) + { + IoInvalidateDeviceState( p_ext->cl_ext.p_pdo ); + + PoStartNextPowerIrp( pIrp ); + IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, pIrp ); + + return STATUS_SUCCESS; + } + + /* Process in work item callback. */ + IoMarkIrpPending( pIrp ); + IoQueueWorkItem( p_ext->pPoWorkItem, __PowerUpCb, DelayedWorkQueue, pIrp ); + /* TODO: Start the HCA. */ + status = mthca_init_one( p_ext ); + if( !NT_SUCCESS( status ) ) + goto done; + + if( p_ext->p_al_dev ) + status = __hca_register( pDevObj ); + +done: + if( !NT_SUCCESS( status ) ) + IoInvalidateDeviceState( p_ext->cl_ext.p_pdo ); + + PoStartNextPowerIrp( pIrp ); + IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, pIrp ); + + HCA_EXIT( HCA_DBG_PO ); + return STATUS_MORE_PROCESSING_REQUIRED; +} + + +/* Work item callback to handle DevicePowerD3 IRPs at passive level. */ +static void +__PowerDownCb( + IN DEVICE_OBJECT* pDevObj, + IN void* context ) +{ + IO_STACK_LOCATION *pIoStack; + hca_dev_ext_t *p_ext; + IRP *pIrp; + + HCA_ENTER( HCA_DBG_PO ); + + p_ext = (hca_dev_ext_t*)pDevObj->DeviceExtension; + pIrp = (IRP*)context; + pIoStack = IoGetCurrentIrpStackLocation( pIrp ); + + IoFreeWorkItem( p_ext->pPoWorkItem ); + p_ext->pPoWorkItem = NULL; + + PoSetPowerState( pDevObj, DevicePowerState, + pIoStack->Parameters.Power.State ); + if( p_ext->state == HCA_REGISTERED ) + { + /* Notify AL that the CA is being removed. */ + p_ext->ci_ifc.deregister_ca( p_ext->hca.guid ); + /* Release AL's CI interface. */ + p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); + + p_ext->state = HCA_STARTED; + } + + mthca_remove_one( p_ext ); + + IoSkipCurrentIrpStackLocation( pIrp ); + PoStartNextPowerIrp( pIrp ); + PoCallDriver( p_ext->cl_ext.p_next_do, pIrp ); + IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, pIrp ); + + HCA_EXIT( HCA_DBG_PO ); +} + + +static NTSTATUS +hca_set_power( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ) +{ + NTSTATUS status; + IO_STACK_LOCATION *pIoStack; + hca_dev_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_PO ); + + p_ext = (hca_dev_ext_t*)pDevObj->DeviceExtension; + pIoStack = IoGetCurrentIrpStackLocation( pIrp ); + + switch( pIoStack->Parameters.Power.Type ) + { + case SystemPowerState: + /* + * Process on the way up the stack. We cannot block since the + * power dispatch function can be called at elevated IRQL if the + * device is in a paging/hibernation/crash dump path. + */ + IoMarkIrpPending( pIrp ); + IoCopyCurrentIrpStackLocationToNext( pIrp ); +#pragma warning( push, 3 ) + IoSetCompletionRoutine( pIrp, __SystemPowerCompletion, NULL, + TRUE, TRUE, TRUE ); +#pragma warning( pop ) + PoCallDriver( p_ext->cl_ext.p_next_do, pIrp ); + + *pAction = IrpDoNothing; + status = STATUS_PENDING; + break; + + case DevicePowerState: + IoMarkIrpPending( pIrp ); + if( pIoStack->Parameters.Power.State.DeviceState == PowerDeviceD0 ) + { + /* If we're already powered up, just pass down. */ + if( p_ext->PowerState == PowerDeviceD0 ) + { + status = STATUS_SUCCESS; + *pAction = IrpIgnore; + break; + } + + /* Process in I/O completion callback. */ + IoCopyCurrentIrpStackLocationToNext( pIrp ); +#pragma warning( push, 3 ) + IoSetCompletionRoutine( pIrp, __DevicePowerCompletion, NULL, + TRUE, TRUE, TRUE ); +#pragma warning( pop ) + PoCallDriver( p_ext->cl_ext.p_next_do, pIrp ); + } + else + { + /* Process in a work item - deregister_ca and HcaDeinit block. */ + ASSERT( !p_ext->pPoWorkItem ); + p_ext->pPoWorkItem = IoAllocateWorkItem( pDevObj ); + if( !p_ext->pPoWorkItem ) + { + status = STATUS_INSUFFICIENT_RESOURCES; + break; + } + + /* Process in work item callback. */ + IoMarkIrpPending( pIrp ); + IoQueueWorkItem( + p_ext->pPoWorkItem, __PowerDownCb, DelayedWorkQueue, pIrp ); + } + *pAction = IrpDoNothing; + status = STATUS_PENDING; + break; + + default: + /* Pass down and let the PDO driver handle it. */ + *pAction = IrpIgnore; + status = STATUS_SUCCESS; + break; + } + + if( !NT_SUCCESS( status ) ) + *pAction = IrpComplete; + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + diff --git a/branches/MTHCA/hw/mthca/kernel/hca_pnp.h b/branches/MTHCA/hw/mthca/kernel/hca_pnp.h new file mode 100644 index 00000000..c23082ed --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/hca_pnp.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: hca_driver.h 46 2005-05-30 17:55:53Z sleybo $ + */ + + +#if !defined( _HCA_PNP_H_ ) +#define _HCA_PNP_H_ + +void hca_init_vfptr( void ); + +NTSTATUS +hca_add_device( + IN PDRIVER_OBJECT pDriverObj, + IN PDEVICE_OBJECT pPdo ); + + +#endif + + diff --git a/branches/MTHCA/hw/mthca/kernel/hca_smp.c b/branches/MTHCA/hw/mthca/kernel/hca_smp.c new file mode 100644 index 00000000..b971888c --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/hca_smp.c @@ -0,0 +1,567 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: hca_smp.c 46 2005-05-30 17:55:53Z sleybo $ + */ + + +/* + * SMP handling of IB Access Layer VPD for Mellanox MT23108 HCA + */ + + +#include "hca_data.h" +#include "hca_debug.h" + + +boolean_t +mlnx_cachable_guid_info( + IN const mlnx_cache_t* const p_cache, + IN const ib_mad_t *p_mad_in, + OUT ib_mad_t *p_mad_out ) +{ + uint32_t idx; + + /* Get the table selector from the attribute */ + idx = cl_ntoh32( p_mad_in->attr_mod ); + + /* + * TODO: Setup the response to fail the MAD instead of sending + * it down to the HCA. + */ + if( idx > 31 ) + return FALSE; + + if( !p_cache->guid_block[idx].valid ) + return FALSE; + + /* + * If a SET, see if the set is identical to the cache, + * in which case it's a no-op. + */ + if( p_mad_in->method == IB_MAD_METHOD_SET ) + { + if( !cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in ), + &p_cache->guid_block[idx].tbl, sizeof(ib_guid_info_t) ) ) + { + /* The set is requesting a change. */ + return FALSE; + } + } + CL_ASSERT( p_mad_in->method == IB_MAD_METHOD_GET ); + + /* Setup the response mad. */ + cl_memcpy( p_mad_out, p_mad_in, MAD_BLOCK_SIZE ); + p_mad_out->method |= IB_MAD_METHOD_RESP_MASK; + if( p_mad_out->mgmt_class == IB_MCLASS_SUBN_DIR ) + p_mad_out->status = IB_SMP_DIRECTION; + else + p_mad_out->status = 0; + + /* Copy the cached data. */ + cl_memcpy( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ), + &p_cache->guid_block[idx].tbl, sizeof(ib_guid_info_t) ); + + return TRUE; +} + + +boolean_t +mlnx_cachable_pkey_table( + IN const mlnx_cache_t* const p_cache, + IN const ib_mad_t *p_mad_in, + OUT ib_mad_t *p_mad_out ) +{ + uint16_t idx; + + /* Get the table selector from the attribute */ + idx = ((uint16_t)cl_ntoh32( p_mad_in->attr_mod )); + + /* + * TODO: Setup the response to fail the MAD instead of sending + * it down to the HCA. + */ + if( idx > 2047 ) + return FALSE; + + if( !p_cache->pkey_tbl[idx].valid ) + return FALSE; + + /* + * If a SET, see if the set is identical to the cache, + * in which case it's a no-op. + */ + if( p_mad_in->method == IB_MAD_METHOD_SET ) + { + if( !cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in ), + &p_cache->pkey_tbl[idx].tbl, sizeof(ib_pkey_table_info_t) ) ) + { + /* The set is requesting a change. */ + return FALSE; + } + } + CL_ASSERT( p_mad_in->method == IB_MAD_METHOD_GET ); + + /* Setup the response mad. */ + cl_memcpy( p_mad_out, p_mad_in, MAD_BLOCK_SIZE ); + p_mad_out->method |= IB_MAD_METHOD_RESP_MASK; + if( p_mad_out->mgmt_class == IB_MCLASS_SUBN_DIR ) + p_mad_out->status = IB_SMP_DIRECTION; + else + p_mad_out->status = 0; + + /* Copy the cached data. */ + cl_memcpy( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ), + &p_cache->pkey_tbl[idx].tbl, sizeof(ib_pkey_table_info_t) ); + + return TRUE; +} + + +boolean_t +mlnx_cachable_sl_vl_table( + IN const mlnx_cache_t* const p_cache, + IN const ib_mad_t *p_mad_in, + OUT ib_mad_t *p_mad_out ) +{ + if( !p_cache->sl_vl.valid ) + return FALSE; + + /* + * If a SET, see if the set is identical to the cache, + * in which case it's a no-op. + */ + if( p_mad_in->method == IB_MAD_METHOD_SET ) + { + if( !cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in ), + &p_cache->sl_vl.tbl, sizeof(ib_slvl_table_t) ) ) + { + /* The set is requesting a change. */ + return FALSE; + } + } + CL_ASSERT( p_mad_in->method == IB_MAD_METHOD_GET ); + + /* Setup the response mad. */ + cl_memcpy( p_mad_out, p_mad_in, MAD_BLOCK_SIZE ); + p_mad_out->method |= IB_MAD_METHOD_RESP_MASK; + if( p_mad_out->mgmt_class == IB_MCLASS_SUBN_DIR ) + p_mad_out->status = IB_SMP_DIRECTION; + else + p_mad_out->status = 0; + + /* Copy the cached data. */ + cl_memcpy( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ), + &p_cache->sl_vl.tbl, sizeof(ib_slvl_table_t) ); + + return TRUE; +} + + +boolean_t +mlnx_cachable_vl_arb_table( + IN const mlnx_cache_t* const p_cache, + IN const ib_mad_t *p_mad_in, + OUT ib_mad_t *p_mad_out ) +{ + uint16_t idx; + + /* Get the table selector from the attribute */ + idx = ((uint16_t)(cl_ntoh32( p_mad_in->attr_mod ) >> 16)) - 1; + + /* + * TODO: Setup the response to fail the MAD instead of sending + * it down to the HCA. + */ + if( idx > 3 ) + return FALSE; + + if( !p_cache->vl_arb[idx].valid ) + return FALSE; + + /* + * If a SET, see if the set is identical to the cache, + * in which case it's a no-op. + */ + if( p_mad_in->method == IB_MAD_METHOD_SET ) + { + if( !cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in ), + &p_cache->vl_arb[idx].tbl, sizeof(ib_vl_arb_table_t) ) ) + { + /* The set is requesting a change. */ + return FALSE; + } + } + CL_ASSERT( p_mad_in->method == IB_MAD_METHOD_GET ); + + /* Setup the response mad. */ + cl_memcpy( p_mad_out, p_mad_in, MAD_BLOCK_SIZE ); + p_mad_out->method |= IB_MAD_METHOD_RESP_MASK; + if( p_mad_out->mgmt_class == IB_MCLASS_SUBN_DIR ) + p_mad_out->status = IB_SMP_DIRECTION; + else + p_mad_out->status = 0; + + /* Copy the cached data. */ + cl_memcpy( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ), + &p_cache->vl_arb[idx].tbl, sizeof(ib_vl_arb_table_t) ); + + return TRUE; +} + + +boolean_t +mlnx_cachable_port_info( + IN const mlnx_cache_t* const p_cache, + IN const uint8_t port_num, + IN const ib_mad_t *p_mad_in, + OUT ib_mad_t *p_mad_out ) +{ + ib_port_info_t *p_port_info; + + UNUSED_PARAM( p_mad_out ); + + if( !p_cache->port_info.valid ) + return FALSE; + + if( p_mad_in->method == IB_MAD_METHOD_GET ) + return FALSE; + + /* + * NOTE: Even though the input MAD is const, we modify it to change + * some parameters to no-ops to compensate for problems in the HCA chip. + */ + p_port_info = + (ib_port_info_t*)ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in ); + + /* We can only cache requests for the same port that the SMP came in on. */ + if( p_mad_in->attr_mod != 0 && + cl_ntoh32( p_mad_in->attr_mod ) != port_num ) + { + return FALSE; + } + + /* + * to avoid unnecessary glitches in port state, we translate these + * fields to NOP when there is no change. Note these fields cannot + * change within the hardware without a Set going through here. + */ + if( p_port_info->link_width_enabled == + p_cache->port_info.info.link_width_enabled ) + { + p_port_info->link_width_enabled = 0; + } + if( (p_port_info->state_info2 & 0x0F) == + (p_cache->port_info.info.state_info2 & 0x0F) ) + { + p_port_info->state_info2 &= 0xF0; + } + if( (p_port_info->link_speed & 0x0F) == + (p_cache->port_info.info.link_speed & 0x0F) ) + { + p_port_info->link_speed &= 0xF0; + } + if( (p_port_info->vl_enforce & 0xF0) == + (p_cache->port_info.info.vl_enforce & 0xF0) ) + { + p_port_info->vl_enforce &= 0x0F; + } + + /* + * We modified the input MAD to change things to no-ops, but + * we can't actually fulfill the MAD with cached data. + */ + return FALSE; +} + + +boolean_t +mlnx_cachable_mad( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_mad_t *p_mad_in, + OUT ib_mad_t *p_mad_out ) +{ + if( p_mad_in->method != IB_MCLASS_SUBN_DIR && + p_mad_in->method != IB_MCLASS_SUBN_LID ) + { + return FALSE; + } + + switch( p_mad_in->attr_id ) + { + case IB_MAD_ATTR_GUID_INFO: + return mlnx_cachable_guid_info( + &h_ca->cache[port_num-1], p_mad_in, p_mad_out ); + + case IB_MAD_ATTR_P_KEY_TABLE: + return mlnx_cachable_pkey_table( + &h_ca->cache[port_num-1], p_mad_in, p_mad_out ); + + case IB_MAD_ATTR_SLVL_TABLE: + return mlnx_cachable_sl_vl_table( + &h_ca->cache[port_num-1], p_mad_in, p_mad_out ); + + case IB_MAD_ATTR_VL_ARBITRATION: + return mlnx_cachable_vl_arb_table( + &h_ca->cache[port_num-1], p_mad_in, p_mad_out ); + + case IB_MAD_ATTR_PORT_INFO: + return mlnx_cachable_port_info( + &h_ca->cache[port_num-1], port_num, p_mad_in, p_mad_out ); + + default: + break; + } + return FALSE; +} + + +void +mlnx_update_guid_info( + IN mlnx_cache_t* const p_cache, + IN const ib_mad_t* const p_mad_out ) +{ + uint32_t idx; + + /* Get the table selector from the attribute */ + idx = cl_ntoh32( p_mad_out->attr_mod ); + + /* + * We only get successful MADs here, so invalid settings + * shouldn't happen. + */ + CL_ASSERT( idx <= 31 ); + + cl_memcpy( &p_cache->guid_block[idx].tbl, + ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ), + sizeof(ib_guid_info_t) ); + p_cache->guid_block[idx].valid = TRUE; +} + + +void +mlnx_update_pkey_table( + IN mlnx_cache_t* const p_cache, + IN const ib_mad_t* const p_mad_out ) +{ + uint16_t idx; + + /* Get the table selector from the attribute */ + idx = ((uint16_t)cl_ntoh32( p_mad_out->attr_mod )); + + ASSERT( idx <= 2047 ); + + cl_memcpy( &p_cache->pkey_tbl[idx].tbl, + ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ), + sizeof(ib_pkey_table_info_t) ); + p_cache->pkey_tbl[idx].valid = TRUE; +} + + +void +mlnx_update_sl_vl_table( + IN mlnx_cache_t* const p_cache, + IN const ib_mad_t* const p_mad_out ) +{ + cl_memcpy( &p_cache->sl_vl.tbl, + ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ), + sizeof(ib_slvl_table_t) ); + p_cache->sl_vl.valid = TRUE; +} + + +void +mlnx_update_vl_arb_table( + IN mlnx_cache_t* const p_cache, + IN const ib_mad_t* const p_mad_out ) +{ + uint16_t idx; + + /* Get the table selector from the attribute */ + idx = ((uint16_t)(cl_ntoh32( p_mad_out->attr_mod ) >> 16)) - 1; + + CL_ASSERT( idx <= 3 ); + + cl_memcpy( &p_cache->vl_arb[idx].tbl, + ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ), + sizeof(ib_vl_arb_table_t) ); + p_cache->vl_arb[idx].valid = TRUE; +} + + +void +mlnx_update_port_info( + IN const mlnx_cache_t* const p_cache, + IN const uint8_t port_num, + IN const ib_mad_t* const p_mad_out ) +{ + UNUSED_PARAM( p_cache ); + + /* We can only cache requests for the same port that the SMP came in on. */ + /* TODO: Add synchronization to support getting data from other ports. */ + if( p_mad_out->attr_mod != 0 && + cl_ntoh32( p_mad_out->attr_mod ) != port_num ) + { + return; + } + + /* TODO: Setup the capabilites mask properly. */ +} + + +void +mlnx_update_cache( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_mad_t *p_mad_out ) +{ + if( p_mad_out->method != IB_MCLASS_SUBN_DIR && + p_mad_out->method != IB_MCLASS_SUBN_LID ) + { + return; + } + + /* Any successful response updates the cache. */ + if( p_mad_out->status ) + return; + + + switch( p_mad_out->attr_id ) + { + case IB_MAD_ATTR_GUID_INFO: + mlnx_update_guid_info( + &h_ca->cache[port_num-1], p_mad_out ); + break; + + case IB_MAD_ATTR_P_KEY_TABLE: + mlnx_update_pkey_table( + &h_ca->cache[port_num-1], p_mad_out ); + break; + + case IB_MAD_ATTR_SLVL_TABLE: + mlnx_update_sl_vl_table( + &h_ca->cache[port_num-1], p_mad_out ); + break; + + case IB_MAD_ATTR_VL_ARBITRATION: + mlnx_update_vl_arb_table( + &h_ca->cache[port_num-1], p_mad_out ); + break; + + case IB_MAD_ATTR_PORT_INFO: + mlnx_update_port_info( + &h_ca->cache[port_num-1], port_num, p_mad_out ); + break; + + default: + break; + } + +} + + +/* + * Local MAD Support Verbs. For CAs that do not support + * agents in HW. + */ +ib_api_status_t +mlnx_local_mad ( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_mad_t *p_mad_in, + OUT ib_mad_t *p_mad_out ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_ca); + UNREFERENCED_PARAMETER(port_num); + UNREFERENCED_PARAMETER(p_mad_in); + UNREFERENCED_PARAMETER(p_mad_out); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_local_mad not implemented\n")); + return IB_INVALID_CA_HANDLE; +#else + ib_api_status_t status; + + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + u_int32_t hca_idx; + mlnx_hobul_t *hobul_p; + HH_hca_dev_t *hca_ul_info; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if (port_num > 2) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + if (!hob_p || E_MARK_CA != hob_p->mark) { + status = IB_INVALID_CA_HANDLE; + goto cleanup; + } + + hca_idx = hob_p->index; + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_CA_HANDLE; + goto cleanup; + } + + hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl; + if (NULL == hca_ul_info) { + status = IB_INVALID_CA_HANDLE; + goto cleanup; + } + + if( !mlnx_cachable_mad( h_ca, port_num, p_mad_in, p_mad_out ) ) + { + if( HH_OK != THH_hob_process_local_mad( hobul_p->hh_hndl, port_num, + 0x0, 0, (void *)p_mad_in, p_mad_out ) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("MAD failed:\n\tClass 0x%x\n\tMethod 0x%x\n\tAttr 0x%x", + p_mad_in->mgmt_class, p_mad_in->method, p_mad_in->attr_id ) ); + status = IB_ERROR; + goto cleanup; + } + + mlnx_update_cache( h_ca, port_num, p_mad_out ); + } + + /* Modify direction for Direct MAD */ + if ( p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR ) + p_mad_out->status |= IB_SMP_DIRECTION; + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} diff --git a/branches/MTHCA/hw/mthca/kernel/hca_verbs.c b/branches/MTHCA/hw/mthca/kernel/hca_verbs.c new file mode 100644 index 00000000..350018c4 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/hca_verbs.c @@ -0,0 +1,3060 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: hca_verbs.c 148 2005-07-12 07:48:46Z sleybo $ + */ + + +#include "hca_driver.h" +#include "mthca_dev.h" +#include "ib_cache.h" + + +#define PTR_ALIGN(size) (((size) + sizeof(void*) - 1) & ~(sizeof(void*) - 1)) + + +/* Matches definition in IbAccess for MaxSMPsWatermark */ +uint32_t g_sqp_max_avs = ((4096/sizeof(ib_mad_t))*32*5); + + +// Local declarations +ib_api_status_t +mlnx_query_qp ( + IN const ib_qp_handle_t h_qp, + OUT ib_qp_attr_t *p_qp_attr, + IN OUT ci_umv_buf_t *p_umv_buf ); + +/* +* CA Access Verbs +*/ +ib_api_status_t +mlnx_open_ca ( + IN const ib_net64_t ca_guid, // IN const char * ca_name, + IN const ci_completion_cb_t pfn_completion_cb, + IN const ci_async_event_cb_t pfn_async_event_cb, + IN const void*const ca_context, + OUT ib_ca_handle_t *ph_ca) +{ +#ifndef WIN_TO_BE_CHANGED + + mlnx_hca_t *p_hca; + ib_api_status_t status; + + HCA_ENTER(MLNX_DBG_TRACE); + HCA_TRACE(MLNX_DBG_INFO, ("context 0x%p\n", ca_context)); + + // find CA object + p_hca = mlnx_hca_from_guid( ca_guid ); + if( !p_hca ) { + HCA_EXIT( MLNX_DBG_TRACE ); + return IB_NOT_FOUND; + } + + HCA_TRACE(MLNX_DBG_INFO, ("context 0x%p\n", ca_context)); + status = mlnx_hobs_set_cb(&p_hca->hob, + pfn_completion_cb, + pfn_async_event_cb, + ca_context); + if (IB_SUCCESS != status) { + goto err_set_cb; + } + + //TODO: do we need something for kernel users ? + + // Return pointer to HOB object + if (ph_ca) *ph_ca = &p_hca->hob; + status = IB_SUCCESS; + +err_set_cb: + + // For user mode call - return status to user mode + HCA_TRACE_ERR(CL_DBG_ERROR, ("completes with ERROR status %s\n", ib_get_err_str(status))); + HCA_EXIT(MLNX_DBG_TRACE); + return status; + +#else + mlnx_hca_t *p_hca; + HH_hca_dev_t * hca_ul_info; + void * hca_ul_resources_p = NULL; // (THH_hca_ul_resources_t *) + ib_api_status_t status; + mlnx_hob_t *new_ca = NULL; + MOSAL_protection_ctx_t prot_ctx; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("context 0x%p\n", ca_context)); + + // find CA object + p_hca = mlnx_hca_from_guid( ca_guid ); + if( !p_hca ) { + HCA_EXIT( MLNX_DBG_TRACE ); + return IB_NOT_FOUND; + } + + hca_ul_info = p_hca->hh_hndl; + + { + // We are opening the HCA in kernel mode. + // if a HOBKL exists for this device (i.e. it is open) - return E_BUSY + if (IB_SUCCESS == mlnx_hobs_lookup(p_hca->hh_hndl, &new_ca)) { + if (ph_ca) *ph_ca = (ib_ca_handle_t)new_ca; + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_RESOURCE_BUSY; + } + + // Create a mapping from hca index to hh_hndl + status = mlnx_hobs_insert(p_hca, &new_ca); + if (IB_SUCCESS != status) { + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; + } + + /* save copy of HCA device object */ + new_ca->p_dev_obj = p_hca->p_dev_obj; + + // Initialize the device driver + if (HH_OK != THH_hob_open_hca(p_hca->hh_hndl, NULL, NULL)) { + status = IB_ERROR; + goto cleanup; + } + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("context 0x%p\n", ca_context)); + status = mlnx_hobs_set_cb(new_ca, + pfn_completion_cb, + pfn_async_event_cb, + ca_context); + if (IB_SUCCESS != status) { + goto cleanup; + } + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("ul_resource sizes: hca %d pd %d\n", + hca_ul_info->hca_ul_resources_sz, + hca_ul_info->pd_ul_resources_sz)); + + hca_ul_resources_p = cl_zalloc( hca_ul_info->hca_ul_resources_sz); + + /* get the kernel protection context */ + prot_ctx = MOSAL_get_kernel_prot_ctx(); + } + + if (!hca_ul_resources_p) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + if (HH_OK != THH_hob_alloc_ul_res(p_hca->hh_hndl, prot_ctx, hca_ul_resources_p)) { + status = IB_ERROR; + goto cleanup; + } + + // TBD: !!! in user mode (kernel hobul_idx != hob_idx) + status = mlnx_hobul_new(new_ca, p_hca->hh_hndl, hca_ul_resources_p); + if (IB_SUCCESS != status) { + goto cleanup; + } + + // Return the HOBUL index + if (ph_ca) *ph_ca = new_ca; + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if (hca_ul_resources_p) + cl_free( hca_ul_resources_p); + THH_hob_close_hca(p_hca->hh_hndl); + mlnx_hobs_remove(new_ca); + + // For user mode call - return status to user mode + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_query_ca ( + IN const ib_ca_handle_t h_ca, + OUT ib_ca_attr_t *p_ca_attr, + IN OUT uint32_t *p_byte_count, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ +#ifndef WIN_TO_BE_CHANGED + + ib_api_status_t status; + uint32_t size, required_size; + u_int8_t port_num, num_ports; + u_int32_t num_gids, num_pkeys; + u_int32_t num_page_sizes = 1; // TBD: what is actually supported + uint8_t *last_p; + u_int32_t priv_op; + struct ib_device_attr props; + struct ib_port_attr *hca_ports = NULL; + int i; + + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p ); + int err; + + HCA_ENTER(MLNX_DBG_TRACE); + + // sanity checks + if( p_umv_buf && p_umv_buf->command ) { + HCA_TRACE (CL_DBG_ERROR, ("User mode is not supported yet\n")); + status = IB_UNSUPPORTED; + goto err_user_unsupported; + } + if (NULL == p_byte_count) { + status = IB_INVALID_PARAMETER; + goto err_byte_count; + } + + // query the device + err = mthca_query_device(ib_dev, &props ); + if (err) { + HCA_TRACE (CL_DBG_ERROR, + ("ib_query_device failed (%d)\n",err)); + status = errno_to_iberr(err); + goto err_query_device; + } + + // alocate arrary for port properties + num_ports = ib_dev->phys_port_cnt; /* Number of physical ports of the HCA */ + if (NULL == (hca_ports = cl_zalloc( num_ports * sizeof *hca_ports))) { + HCA_TRACE (CL_DBG_ERROR, ("Failed to cl_zalloc ports array\n")); + status = IB_INSUFFICIENT_MEMORY; + goto err_alloc_ports; + } + + // start calculation of ib_ca_attr_t full size + num_gids = 0; + num_pkeys = 0; + required_size = PTR_ALIGN(sizeof(ib_ca_attr_t)) + + PTR_ALIGN(sizeof(u_int32_t) * num_page_sizes) + + PTR_ALIGN(sizeof(ib_port_attr_t) * num_ports); + + // get port properties + for (port_num = 0; port_num <= end_port(ib_dev) - start_port(ib_dev); ++port_num) { + // request + err = mthca_query_port(ib_dev, port_num + start_port(ib_dev), &hca_ports[port_num]); + if (err) { + HCA_TRACE (CL_DBG_ERROR, ("ib_query_port failed(%d) for port %d\n",err, port_num)); + status = errno_to_iberr(err); + goto err_query_port; + } + + // calculate GID table size + num_gids = hca_ports[port_num].gid_tbl_len; + size = PTR_ALIGN(sizeof(ib_gid_t) * num_gids); + required_size += size; + + // calculate pkeys table size + num_pkeys = hca_ports[port_num].pkey_tbl_len; + size = PTR_ALIGN(sizeof(u_int16_t) * num_pkeys); + required_size += size; + } + +#ifdef WIN_USER_SUPPORT + // handling user parameters + if( p_umv_buf && p_umv_buf->command ) + { + /* + * Prepare the buffer with the size including hca_ul_resources_sz + * NO ALIGNMENT for this size + */ + + if (p_umv_buf->p_inout_buf) + { + cl_memcpy (&priv_op, p_umv_buf->p_inout_buf, sizeof (priv_op)); + HCA_TRACE(MLNX_DBG_TRACE, ("priv_op = %d\n", priv_op)); + + /* + * Yes, UVP request for hca_ul_info + */ + if (p_umv_buf->input_size != + (sizeof (HH_hca_dev_t) + sizeof (priv_op) )) + { + *p_byte_count = required_size; + p_umv_buf->output_size = 0; + status = IB_INVALID_PARAMETER; + goto cleanup; + } + cl_memcpy( (uint8_t* __ptr64)p_umv_buf->p_inout_buf + sizeof (priv_op), + hca_ul_info, sizeof (HH_hca_dev_t)); + p_umv_buf->output_size = p_umv_buf->input_size; + } + } +#endif + + // resource sufficience check + if (NULL == p_ca_attr || *p_byte_count < required_size) { + *p_byte_count = required_size; + status = IB_INSUFFICIENT_MEMORY; + if ( p_ca_attr != NULL) { + HCA_TRACE (CL_DBG_ERROR, + ("Failed *p_byte_count (%d) < required_size (%d)\n", *p_byte_count, required_size )); + } + goto err_insuff_mem; + } + + // Space is sufficient - setup table pointers + last_p = (uint8_t*)p_ca_attr; + last_p += PTR_ALIGN(sizeof(*p_ca_attr)); + + p_ca_attr->p_page_size = (uint32_t*)last_p; + last_p += PTR_ALIGN(num_page_sizes * sizeof(u_int32_t)); + + p_ca_attr->p_port_attr = (ib_port_attr_t *)last_p; + last_p += PTR_ALIGN(num_ports * sizeof(ib_port_attr_t)); + + for (port_num = 0; port_num < num_ports; port_num++) { + p_ca_attr->p_port_attr[port_num].p_gid_table = (ib_gid_t *)last_p; + size = PTR_ALIGN(sizeof(ib_gid_t) * hca_ports[port_num].gid_tbl_len); + last_p += size; + + p_ca_attr->p_port_attr[port_num].p_pkey_table = (u_int16_t *)last_p; + size = PTR_ALIGN(sizeof(u_int16_t) * hca_ports[port_num].pkey_tbl_len); + last_p += size; + } + + // Separate the loops to ensure that table pointers are always setup + for (port_num = 0; port_num < num_ports; port_num++) { + + // get pkeys, using cache + for (i=0; i < hca_ports[port_num].pkey_tbl_len; ++i) { + err = ib_get_cached_pkey( ib_dev, port_num + start_port(ib_dev), i, + &p_ca_attr->p_port_attr[port_num].p_pkey_table[i] ); + if (err) { + status = errno_to_iberr(err); + HCA_TRACE (CL_DBG_ERROR, + ("ib_get_cached_pkey failed (%d) for port_num %d, index %d\n", + err, port_num + start_port(ib_dev), i)); + goto err_get_pkey; + } + } + + // get gids, using cache + for (i=0; i < hca_ports[port_num].gid_tbl_len; ++i) { + union ib_gid * __ptr64 gid = (union ib_gid *)&p_ca_attr->p_port_attr[port_num].p_gid_table[i]; + err = ib_get_cached_gid( ib_dev, port_num + start_port(ib_dev), i, (union ib_gid *)&gid ); + //TODO: do we need to convert gids to little endian + if (err) { + status = errno_to_iberr(err); + HCA_TRACE (CL_DBG_ERROR, + ("ib_get_cached_gid failed (%d) for port_num %d, index %d\n", + err, port_num + start_port(ib_dev), i)); + goto err_get_gid; + } + } + +#if 0 + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("port %d gid0:", port_num)); + for (i = 0; i < 16; i++) + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, (" 0x%x", p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[i])); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("\n")); +#endif + } + + // set result size + p_ca_attr->size = required_size; + CL_ASSERT( required_size == (((uintn_t)last_p) - ((uintn_t)p_ca_attr)) ); + #if 0 + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("Space required %d used %d\n", + required_size, ((uintn_t)last_p) - ((uintn_t)p_ca_attr)))); + #endif + +#ifdef WIN_USER_SUPPORT + // Convert query result into IBAL structure (no cl_memset()) + if( p_umv_buf && p_umv_buf->command ) + { + } +#endif + + // !!! GID/PKEY tables must be queried before this call !!! + mlnx_conv_hca_cap(ib_dev, &props, hca_ports, p_ca_attr); + + status = IB_SUCCESS; + +err_get_gid: +err_get_pkey: +err_insuff_mem: +err_query_port: + cl_free(hca_ports); +err_alloc_ports: +err_query_device: +err_byte_count: + if( p_umv_buf && p_umv_buf->command ) p_umv_buf->status = status; +err_user_unsupported: + if( status != IB_INSUFFICIENT_MEMORY && status != IB_SUCCESS ) + HCA_TRACE(CL_DBG_ERROR, ("completes with ERROR status %s\n", ib_get_err_str(status))); + HCA_EXIT(MLNX_DBG_TRACE); + return status; + + + +#else + ib_api_status_t status; + + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + HH_hca_hndl_t hh_hndl = NULL; + HH_hca_dev_t *hca_ul_info; + VAPI_hca_cap_t hca_cap; + VAPI_hca_port_t *hca_ports = NULL; + uint32_t size, required_size; + u_int8_t port_num, num_ports; + u_int32_t num_gids, num_pkeys; + u_int32_t num_page_sizes = 1; // TBD: what is actually supported + uint8_t *last_p; + void *hca_ul_resources_p = NULL; + u_int32_t priv_op; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if (NULL == p_byte_count) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + mlnx_hobs_get_handle(hob_p, &hh_hndl); + if (NULL == hh_hndl) { + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("returning E_NODEV dev\n")); + status = IB_INVALID_CA_HANDLE; + goto cleanup; + } + + hca_ul_info = (HH_hca_dev_t *)hh_hndl; + + if (HH_OK != THH_hob_query(hh_hndl, &hca_cap)) { + status = IB_ERROR; + goto cleanup; + } + + num_ports = hca_cap.phys_port_num; /* Number of physical ports of the HCA */ + + if (NULL == (hca_ports = cl_zalloc( num_ports * sizeof(VAPI_hca_port_t)))) { + CL_TRACE (CL_DBG_ERROR, g_mlnx_dbg_lvl, + ("Failed to cl_zalloc ports array\n")); + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + // Loop on ports and get their properties + num_gids = 0; + num_pkeys = 0; + required_size = PTR_ALIGN(sizeof(ib_ca_attr_t)) + + PTR_ALIGN(sizeof(u_int32_t) * num_page_sizes) + + PTR_ALIGN(sizeof(ib_port_attr_t) * num_ports); + for (port_num = 0; port_num < num_ports; port_num++) { + if (HH_OK != THH_hob_query_port_prop(hh_hndl, port_num+1, &hca_ports[port_num])) { + status = IB_ERROR; + goto cleanup; + } + + num_gids = hca_ports[port_num].gid_tbl_len; + size = PTR_ALIGN(sizeof(ib_gid_t) * num_gids); + required_size += size; + + num_pkeys = hca_ports[port_num].pkey_tbl_len; + size = PTR_ALIGN(sizeof(u_int16_t) * num_pkeys); + required_size += size; + } + + if( p_umv_buf && p_umv_buf->command ) + { + /* + * Prepare the buffer with the size including hca_ul_resources_sz + * NO ALIGNMENT for this size + */ + + if (p_umv_buf->p_inout_buf) + { + cl_memcpy (&priv_op, p_umv_buf->p_inout_buf, sizeof (priv_op)); + CL_TRACE(MLNX_DBG_TRACE, g_mlnx_dbg_lvl, ("priv_op = %d\n", priv_op)); + + /* + * Yes, UVP request for hca_ul_info + */ + if (p_umv_buf->input_size != + (sizeof (HH_hca_dev_t) + sizeof (priv_op) )) + { + *p_byte_count = required_size; + p_umv_buf->output_size = 0; + status = IB_INVALID_PARAMETER; + goto cleanup; + } + cl_memcpy( (uint8_t* __ptr64)p_umv_buf->p_inout_buf + sizeof (priv_op), + hca_ul_info, sizeof (HH_hca_dev_t)); + p_umv_buf->output_size = p_umv_buf->input_size; + } + } + + if (NULL == p_ca_attr || *p_byte_count < required_size) { + *p_byte_count = required_size; + status = IB_INSUFFICIENT_MEMORY; + if ( p_ca_attr != NULL) { + CL_TRACE (CL_DBG_ERROR, g_mlnx_dbg_lvl, + ("Failed *p_byte_count < required_size\n")); + } + goto cleanup; + } + + // Space is sufficient - setup table pointers + last_p = (uint8_t*)p_ca_attr; + last_p += PTR_ALIGN(sizeof(*p_ca_attr)); + + p_ca_attr->p_page_size = (uint32_t*)last_p; + last_p += PTR_ALIGN(num_page_sizes * sizeof(u_int32_t)); + + p_ca_attr->p_port_attr = (ib_port_attr_t *)last_p; + last_p += PTR_ALIGN(num_ports * sizeof(ib_port_attr_t)); + + for (port_num = 0; port_num < num_ports; port_num++) { + p_ca_attr->p_port_attr[port_num].p_gid_table = (ib_gid_t *)last_p; + size = PTR_ALIGN(sizeof(ib_gid_t) * hca_ports[port_num].gid_tbl_len); + last_p += size; + + p_ca_attr->p_port_attr[port_num].p_pkey_table = (u_int16_t *)last_p; + size = PTR_ALIGN(sizeof(u_int16_t) * hca_ports[port_num].pkey_tbl_len); + last_p += size; + } + + // Separate the loops to ensure that table pointers are always setup + for (port_num = 0; port_num < num_ports; port_num++) { + status = mlnx_get_hca_pkey_tbl(hh_hndl, port_num+1, + hca_ports[port_num].pkey_tbl_len, + p_ca_attr->p_port_attr[port_num].p_pkey_table); + if (IB_SUCCESS != status) { + CL_TRACE (CL_DBG_ERROR, g_mlnx_dbg_lvl, + ("Failed to mlnx_get_hca_pkey_tbl for port_num:%d\n",port_num)); + goto cleanup; + } + + status = mlnx_get_hca_gid_tbl(hh_hndl, port_num+1, + hca_ports[port_num].gid_tbl_len, + &p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw); + if (IB_SUCCESS != status) { + CL_TRACE (CL_DBG_ERROR, g_mlnx_dbg_lvl, + ("Failed to mlnx_get_hca_gid_tbl for port_num:%d\n",port_num)); + goto cleanup; + } + +#if 0 + { + int i; + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("port %d gid0:", port_num)); + for (i = 0; i < 16; i++) + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, (" 0x%x", p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[i])); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("\n")); + } +#endif + } + + // Convert query result into IBAL structure (no cl_memset()) + if( p_umv_buf && p_umv_buf->command ) + { + // p_ca_attr->size = required_size - hca_ul_info->hca_ul_resources_sz; + p_ca_attr->size = required_size; + } + else + { + p_ca_attr->size = required_size; + } + + // !!! GID/PKEY tables must be queried before this call !!! + mlnx_conv_vapi_hca_cap(hca_ul_info, &hca_cap, hca_ports, p_ca_attr); + + // verify: required space == used space + CL_ASSERT( required_size == (((uintn_t)last_p) - ((uintn_t)p_ca_attr)) ); + +#if 0 + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("Space required %d used %d\n", + required_size, + ((uintn_t)last_p) - ((uintn_t)p_ca_attr)))); +#endif + + if( p_umv_buf && p_umv_buf->command ) p_umv_buf->status = IB_SUCCESS; + if (hca_ul_resources_p) cl_free (hca_ul_resources_p); + if (hca_ports) cl_free( hca_ports ); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if( p_umv_buf && p_umv_buf->command ) p_umv_buf->status = status; + if (hca_ul_resources_p) cl_free (hca_ul_resources_p); + if (hca_ports) cl_free( hca_ports); + if( p_ca_attr != NULL || status != IB_INSUFFICIENT_MEMORY ) + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_modify_ca ( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_ca_mod_t modca_cmd, + IN const ib_port_attr_mod_t *p_port_attr) +{ +#ifndef WIN_TO_BE_CHANGED + +#define SET_CAP_MOD(al_mask, al_fld, ib) \ + if (modca_cmd & al_mask) { \ + if (p_port_attr->cap.##al_fld) \ + props.set_port_cap_mask |= ib; \ + else \ + props.clr_port_cap_mask |= ib; \ + } + + ib_api_status_t status; + int err; + struct ib_port_modify props; + struct ib_port_attr hca_port; + int port_modify_mask = 0; + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p ); + + HCA_ENTER(MLNX_DBG_TRACE); + + // prepare parameters + RtlZeroMemory(&props, sizeof(props)); + SET_CAP_MOD(IB_CA_MOD_IS_SM, sm, IB_PORT_SM); + SET_CAP_MOD(IB_CA_MOD_IS_SNMP_SUPPORTED, snmp, IB_PORT_SNMP_TUNNEL_SUP); + SET_CAP_MOD(IB_CA_MOD_IS_DEV_MGMT_SUPPORTED, dev_mgmt, IB_PORT_DEVICE_MGMT_SUP); + SET_CAP_MOD(IB_CA_MOD_IS_VEND_SUPPORTED, vend, IB_PORT_VENDOR_CLASS_SUP); + if ((modca_cmd & IB_CA_MOD_QKEY_CTR) && (p_port_attr->qkey_ctr == 0)) + port_modify_mask |= IB_PORT_RESET_QKEY_CNTR; + + // modify port + err = mthca_modify_port(ib_dev, port_num, port_modify_mask, &props ); + if (err) { + status = errno_to_iberr(err); + HCA_TRACE (CL_DBG_ERROR, ("mthca_modify_port failed (%d) \n", err)); + goto err_modify_port; + } + + status = IB_SUCCESS; + +err_modify_port: + HCA_TRACE_ERR(CL_DBG_ERROR, ("completes with ERROR status %s\n", ib_get_err_str(status))); + HCA_EXIT(MLNX_DBG_TRACE); + return status; + +#else + ib_api_status_t status; + + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + HH_hca_hndl_t hh_hndl = NULL; + + VAPI_hca_attr_t hca_attr; + VAPI_hca_attr_mask_t hca_attr_mask = 0; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + mlnx_hobs_get_handle(hob_p, &hh_hndl); + if (NULL == hh_hndl) { + status = IB_INVALID_CA_HANDLE; + goto cleanup; + } + + cl_memclr(&hca_attr, sizeof(hca_attr)); + if (modca_cmd & IB_CA_MOD_IS_SM) { + hca_attr_mask |= HCA_ATTR_IS_SM; + hca_attr.is_sm = (MT_bool)p_port_attr->cap.sm; + } + if (modca_cmd & IB_CA_MOD_IS_SNMP_SUPPORTED) { + hca_attr_mask |= HCA_ATTR_IS_SNMP_TUN_SUP; + hca_attr.is_snmp_tun_sup = (MT_bool)p_port_attr->cap.snmp; + } + if (modca_cmd & IB_CA_MOD_IS_DEV_MGMT_SUPPORTED) { + hca_attr_mask |= HCA_ATTR_IS_DEV_MGT_SUP; + hca_attr.is_dev_mgt_sup = (MT_bool)p_port_attr->cap.dev_mgmt; + } + if (modca_cmd & IB_CA_MOD_IS_VEND_SUPPORTED) { + hca_attr_mask |= HCA_ATTR_IS_VENDOR_CLS_SUP; + hca_attr.is_vendor_cls_sup = (MT_bool)p_port_attr->cap.vend; + } + if (modca_cmd & IB_CA_MOD_QKEY_CTR) { + if (p_port_attr->qkey_ctr == 0) + hca_attr.reset_qkey_counter = TRUE; + } + + if (0 != hca_attr_mask) { + if (HH_OK != THH_hob_modify( hh_hndl, port_num, &hca_attr, &hca_attr_mask)) + { + status = IB_ERROR; + goto cleanup; + } + } + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_close_ca ( + IN ib_ca_handle_t h_ca) +{ +#ifndef WIN_TO_BE_CHANGED + + HCA_ENTER(MLNX_DBG_TRACE); + + // release HOB resources + mlnx_hobs_remove(h_ca); + + //TODO: release HOBUL resources + + HCA_EXIT(MLNX_DBG_TRACE); + + return IB_SUCCESS; + +#else + ib_api_status_t status; + + HH_hca_hndl_t hh_hndl = NULL; + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + HH_hca_dev_t *hca_ul_info; + void *hca_ul_resources_p = NULL; + mlnx_hobul_t *hobul_p; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + hobul_p = mlnx_hobul_array[hob_p->index]; + if( !hobul_p ) { + status = IB_INVALID_CA_HANDLE; + goto cleanup; + } + + if( hobul_p->count ) { + status = IB_RESOURCE_BUSY; + goto cleanup; + } + + mlnx_hobs_get_handle(hob_p, &hh_hndl); + if (NULL == hh_hndl) { + status = IB_INVALID_CA_HANDLE; + goto cleanup; + } + + hca_ul_info = (HH_hca_dev_t *)hh_hndl; + mlnx_hobul_get(hob_p, &hca_ul_resources_p); + + if (hca_ul_resources_p) { + THH_hob_free_ul_res(hh_hndl, hca_ul_resources_p); + cl_free( hca_ul_resources_p); + } + mlnx_hobul_delete(hob_p); + THH_hob_close_hca(hh_hndl); + mlnx_hobs_remove(hob_p); + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + + +static ib_api_status_t +mlnx_um_open( + IN const ib_ca_handle_t h_ca, + IN OUT ci_umv_buf_t* const p_umv_buf, + OUT ib_ca_handle_t* const ph_um_ca ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_ca); + UNREFERENCED_PARAMETER(p_umv_buf); + UNREFERENCED_PARAMETER(ph_um_ca); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_um_open not implemented\n")); + return IB_INVALID_CA_HANDLE; +#else + ib_api_status_t status; + + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + HH_hca_hndl_t hh_hndl = NULL; + HH_hca_dev_t *hca_ul_info; + mlnx_um_ca_t *p_um_ca; + MOSAL_protection_ctx_t prot_ctx; + + HCA_ENTER( MLNX_DBG_TRACE ); + + mlnx_hobs_get_handle( hob_p, &hh_hndl ); + if( !hh_hndl ) + { + HCA_TRACE(MLNX_DBG_INFO, ("returning E_NODEV dev\n")); + status = IB_INVALID_CA_HANDLE; + goto mlnx_um_open_err1; + } + + hca_ul_info = (HH_hca_dev_t *)hh_hndl; + + if( !p_umv_buf->command ) + { + p_umv_buf->status = IB_SUCCESS; + goto mlnx_um_open_err1; + } + + /* + * Prepare the buffer with the size including hca_ul_resources_sz + * NO ALIGNMENT for this size + */ + if( !p_umv_buf->p_inout_buf || + p_umv_buf->output_size < sizeof(void*) ) + { + p_umv_buf->status = IB_INVALID_PARAMETER; + goto mlnx_um_open_err1; + } + + HCA_TRACE( MLNX_DBG_TRACE, ("priv_op = %d\n", p_umv_buf->command )); + + /* Yes, UVP request for hca_ul_info. */ + p_um_ca = (mlnx_um_ca_t*)cl_zalloc( + sizeof(mlnx_um_ca_t) + hca_ul_info->hca_ul_resources_sz - 1 ); + if( !p_um_ca ) + { + p_umv_buf->status = IB_INSUFFICIENT_MEMORY; + goto mlnx_um_open_err1; + } + + p_um_ca->p_mdl = IoAllocateMdl( &p_um_ca->dev_info, + (ULONG)(sizeof(HH_hca_dev_t) + hca_ul_info->hca_ul_resources_sz), + FALSE, TRUE, NULL ); + if( !p_um_ca->p_mdl ) + { + p_umv_buf->status = IB_ERROR; + goto mlnx_um_open_err2; + } + /* Build the page list... */ + MmBuildMdlForNonPagedPool( p_um_ca->p_mdl ); + + /* Map the memory into the calling process's address space. */ + __try + { + p_um_ca->p_mapped_addr = + MmMapLockedPagesSpecifyCache( p_um_ca->p_mdl, + UserMode, MmCached, NULL, FALSE, NormalPagePriority ); + } + __except(EXCEPTION_EXECUTE_HANDLER) + { + p_umv_buf->status = IB_ERROR; + goto mlnx_um_open_err3; + } + + /* Register with THH (attach to the HCA). */ + prot_ctx = MOSAL_get_current_prot_ctx(); + if( THH_hob_alloc_ul_res(hh_hndl, prot_ctx, p_um_ca->ul_hca_res) != HH_OK ) + { + HCA_TRACE( CL_DBG_ERROR, ("Failed to get ul_res\n")); + p_umv_buf->status = IB_ERROR; + } + + if( p_umv_buf->status == IB_SUCCESS ) + { + /* Copy the dev info. */ + p_um_ca->dev_info = *hca_ul_info; + *ph_um_ca = (ib_ca_handle_t)p_um_ca; + (*(void** __ptr64)p_umv_buf->p_inout_buf) = p_um_ca->p_mapped_addr; + p_umv_buf->status = IB_SUCCESS; + } + else + { + MmUnmapLockedPages( p_um_ca->p_mapped_addr, p_um_ca->p_mdl ); +mlnx_um_open_err3: + IoFreeMdl( p_um_ca->p_mdl ); +mlnx_um_open_err2: + cl_free( p_um_ca ); +mlnx_um_open_err1: + *ph_um_ca = NULL; + } + + //*ph_um_ca = NULL; + p_umv_buf->output_size = sizeof(void*); + HCA_EXIT( MLNX_DBG_TRACE ); + return p_umv_buf->status; +#endif +} + + +static void +mlnx_um_close( + IN ib_ca_handle_t h_ca, + IN ib_ca_handle_t h_um_ca ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_ca); + UNREFERENCED_PARAMETER(h_um_ca); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_um_close not implemented\n")); + return ; +#else + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + HH_hca_hndl_t hh_hndl = NULL; + mlnx_um_ca_t *p_um_ca = (mlnx_um_ca_t*)h_um_ca; + + HCA_ENTER( MLNX_DBG_TRACE ); + + mlnx_hobs_get_handle( hob_p, &hh_hndl ); + if( !hh_hndl ) + goto mlnx_um_close_cleanup; + + if( !p_um_ca ) + return; + + THH_hob_free_ul_res( hh_hndl, p_um_ca->ul_hca_res ); + +mlnx_um_close_cleanup: + MmUnmapLockedPages( p_um_ca->p_mapped_addr, p_um_ca->p_mdl ); + IoFreeMdl( p_um_ca->p_mdl ); + cl_free( p_um_ca ); + + HCA_EXIT( MLNX_DBG_TRACE ); +#endif +} + + +/* +* Protection Domain and Reliable Datagram Domain Verbs +*/ + +ib_api_status_t +mlnx_allocate_pd ( + IN const ib_ca_handle_t h_ca, + IN const ib_pd_type_t type, + OUT ib_pd_handle_t *ph_pd, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ +#ifndef WIN_TO_BE_CHANGED + + ib_api_status_t status; + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p ); + struct ib_ucontext *context_p = NULL; + struct ib_udata *udata_p = NULL; + struct ib_pd *ib_pd_p; + int err; + + //TODO: how are we use it ? + UNREFERENCED_PARAMETER(type); + + HCA_ENTER(MLNX_DBG_TRACE); + + // sanity checks + if( p_umv_buf && p_umv_buf->command ) { + HCA_TRACE (CL_DBG_ERROR, ("User mode is not supported yet\n")); + status = IB_UNSUPPORTED; + goto err_user_unsupported; + } + +#ifdef WIN_USER_SUPPORT + if( p_umv_buf && p_umv_buf->command ) + { + //TODO: check the below sanity check + if ((p_umv_buf->input_size - sizeof (u_int32_t)) != + hca_ul_info->pd_ul_resources_sz || + NULL == p_umv_buf->p_inout_buf) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + pd_ul_resources_p = (void *)p_umv_buf->p_inout_buf; + + //TODO: create user context by call to mthca_alloc_ucontext() + } +#endif + + // create PD + if( p_umv_buf && p_umv_buf->command ) { + //TODO: call uverbs + } + else { + ib_pd_p = ib_alloc_pd(ib_dev); + if (IS_ERR(ib_pd_p)) { + err = PTR_ERR(ib_pd_p); + HCA_TRACE (CL_DBG_ERROR, ("ib_alloc_pd failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_alloc_pd; + } + } + // return the result + if (ph_pd) *ph_pd = (ib_pd_handle_t)ib_pd_p; + +#ifdef WIN_USER_SUPPORT + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = p_umv_buf->input_size; + /* + * Copy the pd_idx back to user + */ + cl_memcpy (((uint8_t* __ptr64)p_umv_buf->p_inout_buf + hca_ul_info->pd_ul_resources_sz), + &pd_idx, sizeof (pd_idx)); + p_umv_buf->status = IB_SUCCESS; + } +#endif + + status = IB_SUCCESS; + +err_alloc_pd: +err_user_unsupported: + HCA_TRACE_ERR(CL_DBG_ERROR, ("completes with ERROR status %s\n", ib_get_err_str(status))); + HCA_EXIT(MLNX_DBG_TRACE); + return status; + +#else + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + mlnx_hobul_t *hobul_p; + HH_hca_dev_t *hca_ul_info; + HHUL_pd_hndl_t hhul_pd_hndl = 0; + void *pd_ul_resources_p = NULL; + u_int32_t pd_idx; + ib_api_status_t status; + MOSAL_protection_ctx_t prot_ctx; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + hobul_p = mlnx_hobs_get_hobul(hob_p); + if (NULL == hobul_p) { + status = IB_INVALID_CA_HANDLE; + goto cleanup; + } + + hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl; + if (NULL == hca_ul_info) { + status = IB_INVALID_CA_HANDLE; + goto cleanup; + } + + if( p_umv_buf && p_umv_buf->command ) + { + // For user mode calls - obtain and verify the vendor information + if ((p_umv_buf->input_size - sizeof (u_int32_t)) != + hca_ul_info->pd_ul_resources_sz || + NULL == p_umv_buf->p_inout_buf) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + pd_ul_resources_p = (void *)p_umv_buf->p_inout_buf; + + /* get the current protection context */ + prot_ctx = MOSAL_get_current_prot_ctx(); + } + else + { + // for kernel mode calls - allocate app resources. Use prep->call->done sequence + pd_ul_resources_p = cl_zalloc( hca_ul_info->pd_ul_resources_sz); + if (NULL == pd_ul_resources_p) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + switch( type ) + { + case IB_PDT_SQP: + if (HH_OK != THHUL_pdm_alloc_pd_avs_prep(hobul_p->hhul_hndl, + g_sqp_max_avs, PD_FOR_SQP, &hhul_pd_hndl, pd_ul_resources_p)) + { + status = IB_ERROR; + goto cleanup; + } + break; + + case IB_PDT_UD: + if (HH_OK != THHUL_pdm_alloc_pd_avs_prep(hobul_p->hhul_hndl, + g_sqp_max_avs, PD_NO_FLAGS, &hhul_pd_hndl, pd_ul_resources_p)) + { + status = IB_ERROR; + goto cleanup; + } + break; + + default: + if (HH_OK != THHUL_pdm_alloc_pd_prep(hobul_p->hhul_hndl, &hhul_pd_hndl, pd_ul_resources_p)) { + status = IB_ERROR; + goto cleanup; + } + } + /* get the current protection context */ + prot_ctx = MOSAL_get_kernel_prot_ctx(); + } + + // Allocate the PD (cmdif) + if (HH_OK != THH_hob_alloc_pd(hobul_p->hh_hndl, prot_ctx, pd_ul_resources_p, &pd_idx)) { + status = IB_INSUFFICIENT_RESOURCES; + goto cleanup_pd; + } + + if( !(p_umv_buf && p_umv_buf->command) ) + { + // Manage user level resources + if (HH_OK != THHUL_pdm_alloc_pd_done(hobul_p->hhul_hndl, hhul_pd_hndl, pd_idx, pd_ul_resources_p)) { + THH_hob_free_pd(hobul_p->hh_hndl, pd_idx); + status = IB_ERROR; + goto cleanup_pd; + } + } + + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_ERROR, cleanup_pd); + + // Save data refs for future use + cl_mutex_acquire(&hobul_p->pd_info_tbl[pd_idx].mutex); + hobul_p->pd_info_tbl[pd_idx].pd_num = pd_idx; + hobul_p->pd_info_tbl[pd_idx].hca_idx = hob_p->index; + hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl = hhul_pd_hndl; + hobul_p->pd_info_tbl[pd_idx].pd_ul_resources_p = pd_ul_resources_p; + hobul_p->pd_info_tbl[pd_idx].count = 0; + hobul_p->pd_info_tbl[pd_idx].kernel_mode = !(p_umv_buf && p_umv_buf->command); + hobul_p->pd_info_tbl[pd_idx].mark = E_MARK_PD; + cl_mutex_release(&hobul_p->pd_info_tbl[pd_idx].mutex); + + cl_atomic_inc( &hobul_p->count ); + + if (ph_pd) *ph_pd = (ib_pd_handle_t)PD_HNDL_FROM_PD(pd_idx); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hca_idx 0x%x pd_idx 0x%x returned 0x%p\n", hob_p->index, pd_idx, *ph_pd)); + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = p_umv_buf->input_size; + /* + * Copy the pd_idx back to user + */ + cl_memcpy (((uint8_t* __ptr64)p_umv_buf->p_inout_buf + hca_ul_info->pd_ul_resources_sz), + &pd_idx, sizeof (pd_idx)); + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_pd: + THHUL_pdm_free_pd_prep(hobul_p->hhul_hndl, hhul_pd_hndl, FALSE); + THHUL_pdm_free_pd_done(hobul_p->hhul_hndl, hhul_pd_hndl); + +cleanup: + if( !(p_umv_buf && p_umv_buf->command) && pd_ul_resources_p ) + cl_free( pd_ul_resources_p); + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_deallocate_pd ( + IN ib_pd_handle_t h_pd) +{ +#ifndef WIN_TO_BE_CHANGED + + ib_api_status_t status; + int err; + struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd; + + // dealloc pd + if( ib_pd_p->uobject ) { + //TODO: call uverbs + } + else { + err = ib_dealloc_pd( ib_pd_p ); + if (err) { + HCA_TRACE (CL_DBG_ERROR, ("ib_dealloc_pd failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_dealloc_pd; + } + } + status = IB_SUCCESS; + +err_dealloc_pd: + HCA_TRACE_ERR(CL_DBG_ERROR, ("completes with ERROR status %s\n", ib_get_err_str(status))); + HCA_EXIT(MLNX_DBG_TRACE); + return status; + +#else + u_int32_t hca_idx = PD_HCA_FROM_HNDL(h_pd); + u_int32_t pd_idx = PD_NUM_FROM_HNDL(h_pd); + mlnx_hobul_t *hobul_p; + HHUL_pd_hndl_t hhul_pd_hndl; + ib_api_status_t status; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + cl_mutex_acquire(&hobul_p->pd_info_tbl[pd_idx].mutex); + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d k_mod %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count, hobul_p->pd_info_tbl[pd_idx].kernel_mode)); + + if (0 != hobul_p->pd_info_tbl[pd_idx].count) { + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count)); + status = IB_RESOURCE_BUSY; + goto cleanup_locked; + } + + hhul_pd_hndl = hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl; + + // PREP: + if (hobul_p->pd_info_tbl[pd_idx].kernel_mode) { + if (HH_OK != THHUL_pdm_free_pd_prep(hobul_p->hhul_hndl, hhul_pd_hndl, FALSE)) { + status = IB_ERROR; + goto cleanup_locked; + } + } + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d before free_pd hh_hndl %p\n", + pd_idx, hobul_p->hh_hndl)); + + if (HH_OK != THH_hob_free_pd(hobul_p->hh_hndl, pd_idx)) { + status = IB_ERROR; + goto cleanup_locked; + } + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d after free_pd\n", pd_idx)); + + if (hobul_p->pd_info_tbl[pd_idx].kernel_mode) { + if (HH_OK != THHUL_pdm_free_pd_done(hobul_p->hhul_hndl, hhul_pd_hndl)) { + status = IB_ERROR; + goto cleanup_locked; + } + if (hobul_p->pd_info_tbl[pd_idx].pd_ul_resources_p) + cl_free( hobul_p->pd_info_tbl[pd_idx].pd_ul_resources_p); + } + + hobul_p->pd_info_tbl[pd_idx].mark = E_MARK_INVALID; + hobul_p->pd_info_tbl[pd_idx].pd_ul_resources_p = NULL; + + cl_mutex_release(&hobul_p->pd_info_tbl[pd_idx].mutex); + + cl_atomic_dec( &hobul_p->count ); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_locked: + cl_mutex_release(&hobul_p->pd_info_tbl[pd_idx].mutex); + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +/* +* Address Vector Management Verbs +*/ +ib_api_status_t +mlnx_create_av ( + IN const ib_pd_handle_t h_pd, + IN const ib_av_attr_t *p_addr_vector, + OUT ib_av_handle_t *ph_av, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_pd); + UNREFERENCED_PARAMETER(p_addr_vector); + UNREFERENCED_PARAMETER(ph_av); + UNREFERENCED_PARAMETER(p_umv_buf); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_create_av not implemented\n")); + return IB_INVALID_CA_HANDLE; +#else + u_int32_t hca_idx = PD_HCA_FROM_HNDL(h_pd); + u_int32_t pd_idx = PD_NUM_FROM_HNDL(h_pd); + HHUL_ud_av_hndl_t av_h; + mlnx_hobul_t *hobul_p; + mlnx_avo_t *avo_p = NULL; + HHUL_pd_hndl_t hhul_pd_hndl; + ib_api_status_t status; + + VAPI_ud_av_t av; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + hhul_pd_hndl = hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl; + + if (NULL == (avo_p = cl_zalloc( sizeof(mlnx_avo_t)))) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + cl_memclr(&av, sizeof(av)); + mlnx_conv_ibal_av(hobul_p->hh_hndl, p_addr_vector, &av); + // This creates a non priviledged ud_av. + // To create a privilged ud_av call THH_hob_create_ud_av() + if (HH_OK != THHUL_pdm_create_ud_av(hobul_p->hhul_hndl, hhul_pd_hndl, &av, &av_h)) { + status = IB_INSUFFICIENT_RESOURCES; + goto cleanup; + } + + // update PD object count + cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count)); + + + avo_p->mark = E_MARK_AV; + avo_p->hca_idx = hca_idx; + avo_p->pd_idx = pd_idx; + avo_p->h_av = av_h; + + if (ph_av) *ph_av = (ib_av_handle_t)avo_p; + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if (avo_p) { + avo_p->mark = E_MARK_INVALID; + cl_free( avo_p); + } + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_query_av ( + IN const ib_av_handle_t h_av, + OUT ib_av_attr_t *p_addr_vector, + OUT ib_pd_handle_t *ph_pd, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_av); + UNREFERENCED_PARAMETER(p_addr_vector); + UNREFERENCED_PARAMETER(ph_pd); + UNREFERENCED_PARAMETER(p_umv_buf); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_query_av not implemented\n")); + return IB_UNSUPPORTED; +#else + mlnx_avo_t *avo_p = (mlnx_avo_t *)h_av; + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + VAPI_ud_av_t av; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + if (!avo_p || avo_p->mark != E_MARK_AV) { + status = IB_INVALID_AV_HANDLE; + goto cleanup; + } + + VALIDATE_INDEX(avo_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[avo_p->hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_AV_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(avo_p->pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[avo_p->pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + if (p_addr_vector) { + if (HH_OK != THHUL_pdm_query_ud_av(hobul_p->hhul_hndl, avo_p->h_av, &av)) { + status = IB_ERROR; + goto cleanup; + } + mlnx_conv_vapi_av(hobul_p->hh_hndl, &av, p_addr_vector); + } + + if (ph_pd) *ph_pd = (ib_pd_handle_t)PD_HNDL_FROM_PD(avo_p->pd_idx); + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_modify_av ( + IN const ib_av_handle_t h_av, + IN const ib_av_attr_t *p_addr_vector, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_av); + UNREFERENCED_PARAMETER(p_addr_vector); + UNREFERENCED_PARAMETER(p_umv_buf); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_modify_av not implemented\n")); + return IB_UNSUPPORTED; +#else + mlnx_avo_t *avo_p = (mlnx_avo_t *)h_av; + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + VAPI_ud_av_t av; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + if (!avo_p || avo_p->mark != E_MARK_AV) { + status = IB_INVALID_AV_HANDLE; + goto cleanup; + } + + VALIDATE_INDEX(avo_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[avo_p->hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_AV_HANDLE; + goto cleanup; + } + + cl_memclr(&av, sizeof(av)); + mlnx_conv_ibal_av(hobul_p->hh_hndl, p_addr_vector, &av); + if (HH_OK != THHUL_pdm_modify_ud_av(hobul_p->hhul_hndl, avo_p->h_av, &av)) { + status = IB_ERROR; + goto cleanup; + } + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_destroy_av ( + IN const ib_av_handle_t h_av) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_av); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_destroy_av not implemented\n")); + return IB_INVALID_CA_HANDLE; +#else + mlnx_avo_t *avo_p = (mlnx_avo_t *)h_av; + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + if (!avo_p || avo_p->mark != E_MARK_AV) { + status = IB_INVALID_AV_HANDLE; + goto cleanup; + } + + VALIDATE_INDEX(avo_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[avo_p->hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_AV_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(avo_p->pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[avo_p->pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + // This destroy's a non priviledged ud_av. + // To destroy a privilged ud_av call THH_hob_destroy_ud_av() + if (HH_OK != THHUL_pdm_destroy_ud_av(hobul_p->hhul_hndl, avo_p->h_av)) { + status = IB_ERROR; + goto cleanup; + } + + // update PD object count + cl_atomic_dec(&hobul_p->pd_info_tbl[avo_p->pd_idx].count); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", avo_p->pd_idx, hobul_p->pd_info_tbl[avo_p->pd_idx].count)); + + avo_p->mark = E_MARK_INVALID; + cl_free( avo_p); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if (avo_p) { + avo_p->mark = E_MARK_INVALID; + cl_free( avo_p); + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +/* +* Queue Pair Management Verbs +*/ + +ib_api_status_t +mlnx_create_qp ( + IN const ib_pd_handle_t h_pd, + IN const void *qp_context, + IN const ib_qp_create_t *p_create_attr, + OUT ib_qp_attr_t *p_qp_attr, + OUT ib_qp_handle_t *ph_qp, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ +#ifndef WIN_TO_BE_CHANGED + + int err; + ib_api_status_t status; + struct ib_qp * ib_qp_p; + struct mthca_qp *qp_p; + struct ib_qp_init_attr qp_init_attr; + struct ib_ucontext *context_p = NULL; + struct ib_udata *udata_p = NULL; + struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd; + struct ib_device *ib_dev = ib_pd_p->device; + mlnx_hob_t *hob_p = (mlnx_hob_t *)&ib_dev->mdev->ext->hca.hob; + + HCA_ENTER(MLNX_DBG_TRACE); + + // sanity checks + if( p_umv_buf && p_umv_buf->command ) { + HCA_TRACE (CL_DBG_ERROR, ("User mode is not supported yet\n")); + status = IB_UNSUPPORTED; + goto err_user_unsupported; + } + +#ifdef WIN_USER_SUPPORT + if( p_umv_buf && p_umv_buf->command ) + { + //TODO: check the below sanity check + if ((p_umv_buf->input_size - sizeof (u_int32_t)) != + hca_ul_info->pd_ul_resources_sz || + NULL == p_umv_buf->p_inout_buf) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + //TODO: create user context by call to mthca_alloc_ucontext() + } +#endif + + // prepare the parameters + RtlZeroMemory(&qp_init_attr, sizeof(qp_init_attr)); + qp_init_attr.qp_type = p_create_attr->qp_type; + qp_init_attr.event_handler = qp_event_handler; + qp_init_attr.qp_context = hob_p; + qp_init_attr.recv_cq = (struct ib_cq *)p_create_attr->h_rq_cq; + qp_init_attr.send_cq = (struct ib_cq *)p_create_attr->h_sq_cq; + qp_init_attr.cap.max_recv_sge = p_create_attr->rq_sge; + qp_init_attr.cap.max_send_sge = p_create_attr->sq_sge; + qp_init_attr.cap.max_recv_wr = p_create_attr->rq_depth; + qp_init_attr.cap.max_send_wr = p_create_attr->sq_depth; + qp_init_attr.sq_sig_type = (p_create_attr->sq_signaled) ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; + + // create QP + if( p_umv_buf && p_umv_buf->command ) { + //TODO: call uverbs + } + else { + ib_qp_p = ib_create_qp( ib_pd_p, &qp_init_attr ); + if (IS_ERR(ib_qp_p)) { + err = PTR_ERR(ib_qp_p); + HCA_TRACE (CL_DBG_ERROR, ("ib_create_qp failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_create_qp; + } + } + + // fill the object + qp_p = (struct mthca_qp *)ib_qp_p; + qp_p->qp_context = (void*)qp_context; + + // Query QP to obtain requested attributes + if (p_qp_attr) { + status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf); + if (status != IB_SUCCESS) + goto err_query_qp; + } + + // return the results + if (ph_qp) *ph_qp = (ib_qp_handle_t)ib_qp_p; + +#ifdef WIN_USER_SUPPORT + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = p_umv_buf->input_size; + /* + * Copy the pd_idx back to user + */ + cl_memcpy (((uint8_t* __ptr64)p_umv_buf->p_inout_buf + hca_ul_info->pd_ul_resources_sz), + &pd_idx, sizeof (pd_idx)); + p_umv_buf->status = IB_SUCCESS; + } +#endif + + status = IB_SUCCESS; + + +err_query_qp: + if( p_umv_buf && p_umv_buf->command ) { + //TODO: cleanup for user + } + else { + ib_destroy_qp( ib_qp_p ); + } +err_create_qp: +err_user_unsupported: + HCA_TRACE_ERR(CL_DBG_ERROR, ("completes with ERROR status %s\n", ib_get_err_str(status))); + HCA_EXIT(MLNX_DBG_TRACE); + return status; + +#else + ib_api_status_t status; + ib_qp_handle_t h_qp; + + u_int32_t hca_idx = PD_HCA_FROM_HNDL(h_pd); + u_int32_t pd_idx = PD_NUM_FROM_HNDL(h_pd); + u_int32_t qp_num; + u_int32_t qp_idx; + u_int32_t send_cq_num; + u_int32_t send_cq_idx; + u_int32_t recv_cq_num; + u_int32_t recv_cq_idx; + mlnx_hobul_t *hobul_p; + HH_hca_dev_t *hca_ul_info; + HH_qp_init_attr_t hh_qp_init_attr; + HHUL_qp_init_attr_t ul_qp_init_attr; + HHUL_qp_hndl_t hhul_qp_hndl = NULL; + VAPI_qp_cap_t hh_qp_cap; + void *qp_ul_resources_p = NULL; + VAPI_sg_lst_entry_t *send_sge_p = NULL; + VAPI_sg_lst_entry_t *recv_sge_p = NULL; + u_int32_t num_sge; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl; + if (NULL == hca_ul_info) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + // The create attributes must be provided + if (!p_create_attr) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + // convert input parameters + cl_memclr(&ul_qp_init_attr, sizeof(ul_qp_init_attr)); + mlnx_conv_qp_create_attr(p_create_attr, &ul_qp_init_attr, NULL); + send_cq_num = CQ_NUM_FROM_HNDL(p_create_attr->h_sq_cq); + recv_cq_num = CQ_NUM_FROM_HNDL(p_create_attr->h_rq_cq); + send_cq_idx = send_cq_num & hobul_p->cq_idx_mask; + recv_cq_idx = recv_cq_num & hobul_p->cq_idx_mask; + VALIDATE_INDEX(send_cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if ( E_MARK_CQ != hobul_p->cq_info_tbl[send_cq_idx].mark) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(recv_cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if ( E_MARK_CQ != hobul_p->cq_info_tbl[recv_cq_idx].mark) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + ul_qp_init_attr.pd = hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl; + ul_qp_init_attr.sq_cq = hobul_p->cq_info_tbl[send_cq_idx].hhul_cq_hndl; + ul_qp_init_attr.rq_cq = hobul_p->cq_info_tbl[recv_cq_idx].hhul_cq_hndl; + + if( p_umv_buf && p_umv_buf->command ) + { + // For user mode calls - obtain and verify the vendor information + if ((p_umv_buf->input_size - sizeof (u_int32_t)) != + hca_ul_info->qp_ul_resources_sz || + NULL == p_umv_buf->p_inout_buf) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + qp_ul_resources_p = (void *)p_umv_buf->p_inout_buf; + + } else { + // for kernel mode calls - allocate app resources. Use prep->call->done sequence + qp_ul_resources_p = cl_zalloc( hca_ul_info->qp_ul_resources_sz); + if (!qp_ul_resources_p) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + if (HH_OK != THHUL_qpm_create_qp_prep(hobul_p->hhul_hndl, &ul_qp_init_attr, &hhul_qp_hndl, &hh_qp_cap, qp_ul_resources_p)) { + status = IB_ERROR; + goto cleanup; + } + // TBD: if not same report error to IBAL + ul_qp_init_attr.qp_cap = hh_qp_cap; // struct assign + } + + // Convert HHUL to HH structure (for HH create_qp) + hh_qp_init_attr.pd = pd_idx; + hh_qp_init_attr.rdd = 0; // TBD: RDD + if( ul_qp_init_attr.srq != HHUL_INVAL_SRQ_HNDL ) + { + // TBD: HH handle from HHUL handle. + CL_ASSERT( ul_qp_init_attr.srq == HHUL_INVAL_SRQ_HNDL ); + } + else + { + hh_qp_init_attr.srq = HH_INVAL_SRQ_HNDL; + } + hh_qp_init_attr.sq_cq = send_cq_num; + hh_qp_init_attr.rq_cq = recv_cq_num; + hh_qp_init_attr.sq_sig_type = ul_qp_init_attr.sq_sig_type; + hh_qp_init_attr.rq_sig_type = ul_qp_init_attr.rq_sig_type; + hh_qp_init_attr.ts_type = ul_qp_init_attr.ts_type; + hh_qp_init_attr.qp_cap = ul_qp_init_attr.qp_cap; // struct assign + + // Allocate the QP (cmdif) + if (HH_OK != THH_hob_create_qp(hobul_p->hh_hndl, &hh_qp_init_attr, qp_ul_resources_p, &qp_num)) { + status = IB_INSUFFICIENT_RESOURCES; + goto cleanup_qp; + } + + if( !(p_umv_buf && p_umv_buf->command) ) + { + // Manage user level resources + if (HH_OK != THHUL_qpm_create_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl, qp_num, qp_ul_resources_p)) { + THH_hob_destroy_qp(hobul_p->hh_hndl, qp_num); + status = IB_ERROR; + goto cleanup_qp; + } + + // Create SQ and RQ iov + num_sge = ul_qp_init_attr.qp_cap.max_sg_size_sq; + send_sge_p = cl_zalloc( num_sge * sizeof(VAPI_sg_lst_entry_t)); + if (!send_sge_p) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup_qp; + } + + num_sge = ul_qp_init_attr.qp_cap.max_sg_size_rq; + recv_sge_p = cl_zalloc( num_sge * sizeof(VAPI_sg_lst_entry_t)); + if (!recv_sge_p) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup_qp; + } + } + + // Save data refs for future use + qp_idx = qp_num & hobul_p->qp_idx_mask; + VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_ERROR, cleanup_qp); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hobul_p 0x%p mask 0x%x qp_idx 0x%x qp_num 0x%x\n", + hobul_p, hobul_p->qp_idx_mask, qp_idx, qp_num)); + + h_qp = (ib_qp_handle_t)QP_HNDL_FROM_QP(qp_idx); + cl_mutex_acquire(&h_qp->mutex); + h_qp->pd_num = pd_idx; + h_qp->hhul_qp_hndl = hhul_qp_hndl; + h_qp->qp_type = p_create_attr->qp_type; + h_qp->sq_signaled = p_create_attr->sq_signaled; + h_qp->qp_context = qp_context; + h_qp->qp_ul_resources_p = qp_ul_resources_p; + h_qp->sq_size = ul_qp_init_attr.qp_cap.max_sg_size_sq; + h_qp->rq_size = ul_qp_init_attr.qp_cap.max_sg_size_rq; + h_qp->send_sge_p = send_sge_p; + h_qp->recv_sge_p = recv_sge_p; + h_qp->qp_num = qp_num; + h_qp->h_sq_cq = &hobul_p->cq_info_tbl[send_cq_idx]; + h_qp->h_rq_cq = &hobul_p->cq_info_tbl[recv_cq_idx]; + h_qp->kernel_mode = !(p_umv_buf && p_umv_buf->command); + h_qp->mark = E_MARK_QP; + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("qp num 0x%x idx 0x%x cq_s 0x%x cq_r 0x%x\n", + qp_num, qp_idx, send_cq_idx, recv_cq_idx)); + cl_mutex_release(&h_qp->mutex); + // Update PD object count + cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count)); + + // Query QP to obtain requested attributes + if (p_qp_attr) { + if (IB_SUCCESS != (status = mlnx_query_qp (h_qp, p_qp_attr, p_umv_buf))) + { + if( !(p_umv_buf && p_umv_buf->command) ) + goto cleanup_qp; + else + goto cleanup; + } + } + + if (ph_qp) *ph_qp = h_qp; + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = p_umv_buf->input_size; + p_umv_buf->status = IB_SUCCESS; + /* + * Copy the qp_idx back to user + */ + cl_memcpy (((uint8_t* __ptr64)p_umv_buf->p_inout_buf + hca_ul_info->qp_ul_resources_sz), + &qp_num, sizeof (qp_num)); + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_qp: + if (send_sge_p) cl_free( send_sge_p); + if (recv_sge_p) cl_free( recv_sge_p); + if( !(p_umv_buf && p_umv_buf->command) ) + THHUL_qpm_destroy_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl); + +cleanup: + if( !(p_umv_buf && p_umv_buf->command) && qp_ul_resources_p) + cl_free( qp_ul_resources_p); + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_create_spl_qp ( + IN const ib_pd_handle_t h_pd, + IN const uint8_t port_num, + IN const void *qp_context, + IN const ib_qp_create_t *p_create_attr, + OUT ib_qp_attr_t *p_qp_attr, + OUT ib_qp_handle_t *ph_qp ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_pd); + UNREFERENCED_PARAMETER(qp_context); + UNREFERENCED_PARAMETER(p_create_attr); + UNREFERENCED_PARAMETER(p_qp_attr); + UNREFERENCED_PARAMETER(ph_qp); + UNREFERENCED_PARAMETER(port_num); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_create_spl_qp not implemented\n")); + return IB_UNSUPPORTED; +#else + ib_api_status_t status; + ib_qp_handle_t h_qp; + ci_umv_buf_t *p_umv_buf = NULL; + + u_int32_t hca_idx = PD_HCA_FROM_HNDL(h_pd); + u_int32_t pd_idx = PD_NUM_FROM_HNDL(h_pd); + u_int32_t qp_num; + u_int32_t qp_idx; + u_int32_t send_cq_num; + u_int32_t send_cq_idx; + u_int32_t recv_cq_num; + u_int32_t recv_cq_idx; + mlnx_hobul_t *hobul_p; + HH_hca_dev_t *hca_ul_info; + HH_qp_init_attr_t hh_qp_init_attr; + HHUL_qp_init_attr_t ul_qp_init_attr; + HHUL_qp_hndl_t hhul_qp_hndl = NULL; + VAPI_special_qp_t vapi_qp_type; + VAPI_qp_cap_t hh_qp_cap; + void *qp_ul_resources_p = NULL; + VAPI_sg_lst_entry_t *send_sge_p = NULL; + VAPI_sg_lst_entry_t *recv_sge_p = NULL; + u_int32_t num_sge; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl; + if (NULL == hca_ul_info) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + // The create attributes must be provided + if (!p_create_attr) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + // convert input parameters + cl_memclr(&ul_qp_init_attr, sizeof(ul_qp_init_attr)); + mlnx_conv_qp_create_attr(p_create_attr, &ul_qp_init_attr, &vapi_qp_type); + send_cq_num = CQ_NUM_FROM_HNDL(p_create_attr->h_sq_cq); + recv_cq_num = CQ_NUM_FROM_HNDL(p_create_attr->h_rq_cq); + send_cq_idx = send_cq_num & hobul_p->cq_idx_mask; + recv_cq_idx = recv_cq_num & hobul_p->cq_idx_mask; + VALIDATE_INDEX(send_cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if ( E_MARK_CQ != hobul_p->cq_info_tbl[send_cq_idx].mark) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(recv_cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if ( E_MARK_CQ != hobul_p->cq_info_tbl[recv_cq_idx].mark) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + ul_qp_init_attr.pd = hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl; + ul_qp_init_attr.sq_cq = hobul_p->cq_info_tbl[send_cq_idx].hhul_cq_hndl; + ul_qp_init_attr.rq_cq = hobul_p->cq_info_tbl[recv_cq_idx].hhul_cq_hndl; + + if( p_umv_buf && p_umv_buf->command ) + { + // For user mode calls - obtain and verify the vendor information + if (p_umv_buf->input_size != hca_ul_info->qp_ul_resources_sz || + NULL == p_umv_buf->p_inout_buf) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + qp_ul_resources_p = (void *)p_umv_buf->p_inout_buf; + + } else { + // For kernel mode calls - allocate app resources. Use prep->call->done sequence + qp_ul_resources_p = cl_zalloc( hca_ul_info->qp_ul_resources_sz); + if (!qp_ul_resources_p) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + if (HH_OK != THHUL_qpm_special_qp_prep(hobul_p->hhul_hndl, + vapi_qp_type, + port_num, + &ul_qp_init_attr, + &hhul_qp_hndl, + &hh_qp_cap, + qp_ul_resources_p)) { + status = IB_ERROR; + goto cleanup; + } + // TBD: if not same report error to IBAL + ul_qp_init_attr.qp_cap = hh_qp_cap; // struct assign + } + + // Convert HHUL to HH structure (for HH create_qp) + hh_qp_init_attr.pd = pd_idx; + hh_qp_init_attr.rdd = 0; // TBD: RDD + if( ul_qp_init_attr.srq != HHUL_INVAL_SRQ_HNDL ) + { + // TBD: HH handle from HHUL handle. + CL_ASSERT( ul_qp_init_attr.srq == HHUL_INVAL_SRQ_HNDL ); + } + else + { + hh_qp_init_attr.srq = HH_INVAL_SRQ_HNDL; + } + hh_qp_init_attr.sq_cq = send_cq_num; + hh_qp_init_attr.rq_cq = recv_cq_num; + hh_qp_init_attr.sq_sig_type = ul_qp_init_attr.sq_sig_type; + hh_qp_init_attr.rq_sig_type = ul_qp_init_attr.rq_sig_type; + hh_qp_init_attr.ts_type = VAPI_TS_UD; + hh_qp_init_attr.qp_cap = ul_qp_init_attr.qp_cap; // struct assign + + // Allocate the QP (cmdif) + if (HH_OK != THH_hob_get_special_qp( hobul_p->hh_hndl, + vapi_qp_type, + port_num, + &hh_qp_init_attr, + qp_ul_resources_p, + &qp_num)) + { + status = IB_ERROR; + goto cleanup_qp; + } + + if( !(p_umv_buf && p_umv_buf->command) ) + { + // Manage user level resources + if (HH_OK != THHUL_qpm_create_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl, qp_num, qp_ul_resources_p)) { + THH_hob_destroy_qp(hobul_p->hh_hndl, qp_num); + status = IB_ERROR; + goto cleanup_qp; + } + + // Create SQ and RQ iov + num_sge = ul_qp_init_attr.qp_cap.max_sg_size_sq; + send_sge_p = cl_zalloc( num_sge * sizeof(VAPI_sg_lst_entry_t)); + if (!send_sge_p) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup_qp; + } + + num_sge = ul_qp_init_attr.qp_cap.max_sg_size_rq; + recv_sge_p = cl_zalloc( num_sge * sizeof(VAPI_sg_lst_entry_t)); + if (!recv_sge_p) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup_qp; + } + } + + // Save data refs for future use + qp_idx = qp_num & hobul_p->qp_idx_mask; + VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_ERROR, cleanup_qp); + + h_qp = (ib_qp_handle_t)QP_HNDL_FROM_QP(qp_idx); + cl_mutex_acquire(&h_qp->mutex); + h_qp->pd_num = pd_idx; + h_qp->hhul_qp_hndl = hhul_qp_hndl; + h_qp->qp_type = p_create_attr->qp_type; + h_qp->sq_signaled = p_create_attr->sq_signaled; + h_qp->qp_context = qp_context; + h_qp->qp_ul_resources_p = qp_ul_resources_p; + h_qp->sq_size = ul_qp_init_attr.qp_cap.max_sg_size_sq; + h_qp->rq_size = ul_qp_init_attr.qp_cap.max_sg_size_rq; + h_qp->send_sge_p = send_sge_p; + h_qp->recv_sge_p = recv_sge_p; + h_qp->qp_num = qp_num; + h_qp->h_sq_cq = &hobul_p->cq_info_tbl[send_cq_idx]; + h_qp->h_rq_cq = &hobul_p->cq_info_tbl[recv_cq_idx]; + h_qp->kernel_mode = !(p_umv_buf && p_umv_buf->command); + h_qp->mark = E_MARK_QP; + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("qp num 0x%x idx 0x%x cq_s 0x%x cq_r 0x%x\n", + qp_num, qp_idx, send_cq_idx, recv_cq_idx)); + cl_mutex_release(&h_qp->mutex); + + /* Mark the CQ's associated with this special QP as being high priority. */ + cl_atomic_inc( &h_qp->h_sq_cq->spl_qp_cnt ); + KeSetImportanceDpc( &h_qp->h_sq_cq->dpc, HighImportance ); + cl_atomic_inc( &h_qp->h_rq_cq->spl_qp_cnt ); + KeSetImportanceDpc( &h_qp->h_rq_cq->dpc, HighImportance ); + + // Update PD object count + cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count)); + + // Query QP to obtain requested attributes + if (p_qp_attr) { + if (IB_SUCCESS != (status = mlnx_query_qp (h_qp, p_qp_attr, p_umv_buf))) { + goto cleanup; + } + } + + if (ph_qp) *ph_qp = h_qp; + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = p_umv_buf->input_size; + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_qp: + if (send_sge_p) cl_free( send_sge_p); + if (recv_sge_p) cl_free( recv_sge_p); + if( !(p_umv_buf && p_umv_buf->command) ) + THHUL_qpm_destroy_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl); + +cleanup: + if( !(p_umv_buf && p_umv_buf->command) && qp_ul_resources_p ) + cl_free( qp_ul_resources_p); + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_modify_qp ( + IN const ib_qp_handle_t h_qp, + IN const ib_qp_mod_t *p_modify_attr, + OUT ib_qp_attr_t *p_qp_attr OPTIONAL, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_qp); + UNREFERENCED_PARAMETER(p_modify_attr); + UNREFERENCED_PARAMETER(p_qp_attr); + UNREFERENCED_PARAMETER(p_umv_buf); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_modify_qp not implemented\n")); + return IB_INVALID_CA_HANDLE; +#else + ib_api_status_t status; + + u_int32_t hca_idx = QP_HCA_FROM_HNDL(h_qp); + u_int32_t qp_num = QP_NUM_FROM_HNDL(h_qp); + u_int32_t qp_idx = 0; + mlnx_hobul_t *hobul_p; + HHUL_qp_hndl_t hhul_qp_hndl; + VAPI_qp_attr_mask_t hh_qp_attr_mask; + VAPI_qp_attr_t hh_qp_attr; + VAPI_qp_state_t hh_qp_state; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + qp_idx = qp_num & hobul_p->qp_idx_mask; + VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup); + if ( E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, + ("Before acquire mutex to modify qp_idx 0x%x\n", + qp_idx)); + + cl_mutex_acquire(&hobul_p->qp_info_tbl[qp_idx].mutex); + + hhul_qp_hndl = hobul_p->qp_info_tbl[qp_idx].hhul_qp_hndl; + + // Obtain curernt state of QP + if (HH_OK != THH_hob_query_qp(hobul_p->hh_hndl, hobul_p->qp_info_tbl[qp_idx].qp_num, &hh_qp_attr)) + { + status = IB_ERROR; + goto cleanup_locked; + } + hh_qp_state = hh_qp_attr.qp_state; // The current (pre-modify) state + + // Convert the input parameters. Use query result as default (no cl_memset()) + // cl_memclr(&hh_qp_attr, sizeof(hh_qp_attr)); + status = mlnx_conv_qp_modify_attr(hobul_p->hh_hndl, + hobul_p->qp_info_tbl[qp_idx].qp_type, + p_modify_attr, &hh_qp_attr, &hh_qp_attr_mask); + if( status != IB_SUCCESS ) + goto cleanup_locked; + + if (HH_OK != THH_hob_modify_qp(hobul_p->hh_hndl, + hobul_p->qp_info_tbl[qp_idx].qp_num, + hh_qp_state, &hh_qp_attr, &hh_qp_attr_mask)) + { + status = IB_ERROR; + goto cleanup_locked; + } + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, + ("After hob_modify_qp qp_idx 0x%x k_mod %d\n", + qp_idx, hobul_p->qp_info_tbl[qp_idx].kernel_mode)); + + // Notify HHUL of the new (post-modify) state. This is done for both k-mode calls only + if (hobul_p->qp_info_tbl[qp_idx].kernel_mode) { + if (HH_OK != THHUL_qpm_modify_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl, hh_qp_attr.qp_state)) + { + status = IB_ERROR; + goto cleanup_locked; + } + } + cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex); + + if ((p_qp_attr) && !(p_umv_buf && p_umv_buf->command)) { + if (IB_SUCCESS != (status = mlnx_query_qp (h_qp, p_qp_attr, p_umv_buf))) { + goto cleanup; + } + } + + if ( p_umv_buf && p_umv_buf->command && (! hobul_p->qp_info_tbl[qp_idx].kernel_mode)) { + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, + ("mod_qp qp_idx %d umv_buf %p inout_buf %p\n", + qp_idx, p_umv_buf, p_umv_buf->p_inout_buf)); + if (p_umv_buf->p_inout_buf) { + p_umv_buf->output_size = sizeof (VAPI_qp_state_t); + cl_memcpy (p_umv_buf->p_inout_buf, &(hh_qp_attr.qp_state), + (size_t)p_umv_buf->output_size); + p_umv_buf->status = IB_SUCCESS; + } + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + + +cleanup_locked: + cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex); + +cleanup: + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_query_qp ( + IN const ib_qp_handle_t h_qp, + OUT ib_qp_attr_t *p_qp_attr, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_qp); + UNREFERENCED_PARAMETER(p_qp_attr); + UNREFERENCED_PARAMETER(p_umv_buf); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_query_qp not implemented\n")); + return IB_UNSUPPORTED; +#else + ib_api_status_t status; + + u_int32_t hca_idx = QP_HCA_FROM_HNDL(h_qp); + u_int32_t qp_num = QP_NUM_FROM_HNDL(h_qp); + u_int32_t qp_idx = 0; + mlnx_hobul_t *hobul_p; + VAPI_qp_attr_t hh_qp_attr; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + qp_idx = qp_num & hobul_p->qp_idx_mask; + VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup); + if ( E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + cl_mutex_acquire(&h_qp->mutex); + + if (HH_OK != THH_hob_query_qp(hobul_p->hh_hndl, h_qp->qp_num, &hh_qp_attr)) { + status = IB_ERROR; + goto cleanup_locked; + } + + // Convert query result into IBAL structure (no cl_memset()) + mlnx_conv_vapi_qp_attr(hobul_p->hh_hndl, &hh_qp_attr, p_qp_attr); + p_qp_attr->qp_type = h_qp->qp_type; + p_qp_attr->h_pd = (ib_pd_handle_t)PD_HNDL_FROM_PD(h_qp->pd_num); + p_qp_attr->h_sq_cq = h_qp->h_sq_cq; + p_qp_attr->h_rq_cq = h_qp->h_rq_cq; + p_qp_attr->sq_signaled = h_qp->sq_signaled; + + cl_mutex_release(&h_qp->mutex); + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_locked: + cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex); +cleanup: + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_destroy_qp ( + IN const ib_qp_handle_t h_qp, + IN const uint64_t timewait ) +{ +#ifndef WIN_TO_BE_CHANGED + + ib_api_status_t status; + int err; + struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp; + + UNUSED_PARAM( timewait ); + + // destroy CQ + if( ib_qp_p->uobject ) { + //TODO: call uverbs + } + else { + err = ib_destroy_qp( ib_qp_p ); + if (err) { + HCA_TRACE (CL_DBG_ERROR, ("ib_destroy_qp failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_destroy_qp; + } + } + + status = IB_SUCCESS; + +err_destroy_qp: + HCA_TRACE_ERR(CL_DBG_ERROR, ("completes with ERROR status %s\n", ib_get_err_str(status))); + HCA_EXIT(MLNX_DBG_TRACE); + return status; + +#else + ib_api_status_t status; + + u_int32_t hca_idx = QP_HCA_FROM_HNDL(h_qp); + u_int32_t qp_num = QP_NUM_FROM_HNDL(h_qp); + u_int32_t pd_idx = 0; + u_int32_t qp_idx = 0; + mlnx_hobul_t *hobul_p; + HHUL_qp_hndl_t hhul_qp_hndl; + + UNUSED_PARAM( timewait ); + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hca %d qp 0x%x\n", hca_idx, qp_num)); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + qp_idx = qp_num & hobul_p->qp_idx_mask; + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hobul_p 0x%p mask 0x%x qp_idx 0x%x mark %d\n", + hobul_p, hobul_p->qp_idx_mask, qp_idx, hobul_p->qp_info_tbl[qp_idx].mark)); + + VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup); + if ( E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark) { + if (E_MARK_INVALID == hobul_p->qp_info_tbl[qp_idx].mark) { + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(IB_INVALID_QP_HANDLE))); + return IB_SUCCESS; // Already freed + } + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + cl_mutex_acquire(&hobul_p->qp_info_tbl[qp_idx].mutex); + + hhul_qp_hndl = hobul_p->qp_info_tbl[qp_idx].hhul_qp_hndl; + pd_idx = hobul_p->qp_info_tbl[qp_idx].pd_num; + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_ERROR, cleanup_locked); + + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("file %s line %d\n", __FILE__, __LINE__)); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd_idx 0x%x mark %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].mark)); + status = IB_INVALID_PD_HANDLE; + goto cleanup_locked; + } + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, + ("Before THH_destroy qp_idx 0x%x k_mod %d pd_idx 0x%x\n", + qp_idx, hobul_p->qp_info_tbl[qp_idx].kernel_mode, pd_idx)); + + // PREP: no PREP required for destroy_qp + if (HH_OK != THH_hob_destroy_qp(hobul_p->hh_hndl, qp_num)) { + status = IB_ERROR; + goto cleanup_locked; + } + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, + ("After THH_destroy qp_idx 0x%x k_mod %d pd_idx 0x%x\n", + qp_idx, hobul_p->qp_info_tbl[qp_idx].kernel_mode, pd_idx)); + + if (hobul_p->qp_info_tbl[qp_idx].kernel_mode) { + if (HH_OK != THHUL_qpm_destroy_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl)) { + status = IB_ERROR; + goto cleanup_locked; + } + if (hobul_p->qp_info_tbl[qp_idx].qp_ul_resources_p) + cl_free( hobul_p->qp_info_tbl[qp_idx].qp_ul_resources_p); + if (hobul_p->qp_info_tbl[qp_idx].send_sge_p) + cl_free( hobul_p->qp_info_tbl[qp_idx].send_sge_p); + if (hobul_p->qp_info_tbl[qp_idx].recv_sge_p) + cl_free( hobul_p->qp_info_tbl[qp_idx].recv_sge_p); + } + + if( h_qp->qp_type == IB_QPT_QP0 || h_qp->qp_type == IB_QPT_QP1 ) + { + if( !cl_atomic_dec( &h_qp->h_sq_cq->spl_qp_cnt ) ) + KeSetImportanceDpc( &h_qp->h_sq_cq->dpc, MediumImportance ); + if( !cl_atomic_dec( &h_qp->h_rq_cq->spl_qp_cnt ) ) + KeSetImportanceDpc( &h_qp->h_rq_cq->dpc, MediumImportance ); + } + + hobul_p->qp_info_tbl[qp_idx].mark = E_MARK_INVALID; + hobul_p->qp_info_tbl[qp_idx].qp_ul_resources_p = NULL; + cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex); + + // Update PD object count + cl_atomic_dec(&hobul_p->pd_info_tbl[pd_idx].count); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count)); + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_locked: + cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex); +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +/* +* Completion Queue Managment Verbs. +*/ + +ib_api_status_t +mlnx_create_cq ( + IN const ib_ca_handle_t h_ca, + IN const void *cq_context, + IN OUT uint32_t *p_size, + OUT ib_cq_handle_t *ph_cq, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ +#ifndef WIN_TO_BE_CHANGED + + int err; + ib_api_status_t status; + struct ib_cq *ib_cq_p; + struct mthca_cq *cq_p; + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p ); + struct ib_ucontext *context_p = NULL; + struct ib_udata *udata_p = NULL; + + HCA_ENTER(MLNX_DBG_TRACE); + + // sanity checks + if( p_umv_buf && p_umv_buf->command ) { + HCA_TRACE (CL_DBG_ERROR, ("User mode is not supported yet\n")); + status = IB_UNSUPPORTED; + goto err_user_unsupported; + } + +#ifdef WIN_USER_SUPPORT + if( p_umv_buf && p_umv_buf->command ) + { + //TODO: check the below sanity check + if ((p_umv_buf->input_size - sizeof (u_int32_t)) != + hca_ul_info->pd_ul_resources_sz || + NULL == p_umv_buf->p_inout_buf) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + //TODO: create user context by call to mthca_alloc_ucontext() + } +#endif + + // create CQ + if( p_umv_buf && p_umv_buf->command ) { + //TODO: call uverbs + } + else { + ib_cq_p = ib_create_cq(ib_dev, + cq_comp_handler, cq_event_handler, + hob_p, *p_size ); + if (IS_ERR(ib_cq_p)) { + err = PTR_ERR(ib_cq_p); + HCA_TRACE (CL_DBG_ERROR, ("ib_create_cq failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_create_cq; + } + } + + // fill the object + cq_p = (struct mthca_cq *)ib_cq_p; + cq_p->cq_context = (void*)cq_context; + + // return the result + if (ph_cq) *ph_cq = (ib_cq_handle_t)ib_cq_p; + +#ifdef WIN_USER_SUPPORT + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = p_umv_buf->input_size; + /* + * Copy the pd_idx back to user + */ + cl_memcpy (((uint8_t* __ptr64)p_umv_buf->p_inout_buf + hca_ul_info->pd_ul_resources_sz), + &pd_idx, sizeof (pd_idx)); + p_umv_buf->status = IB_SUCCESS; + } +#endif + + status = IB_SUCCESS; + +err_create_cq: +err_user_unsupported: + HCA_TRACE_ERR(CL_DBG_ERROR, ("completes with ERROR status %s\n", ib_get_err_str(status))); + HCA_EXIT(MLNX_DBG_TRACE); + return status; + +#else + ib_api_status_t status; + + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + u_int32_t cq_idx; + u_int32_t cq_num; + u_int32_t cq_size = 0; + mlnx_hobul_t *hobul_p; + HH_hca_dev_t *hca_ul_info; + HHUL_cq_hndl_t hhul_cq_hndl = NULL; + void *cq_ul_resources_p = NULL; + MOSAL_protection_ctx_t prot_ctx; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + hobul_p = mlnx_hobs_get_hobul(hob_p); + if (NULL == hobul_p) { + status = IB_INVALID_CA_HANDLE; + goto cleanup; + } + + hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl; + if (NULL == hca_ul_info) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + // The size must be provided + if (!p_size) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + // TBD: verify that the number requested does not exceed to maximum allowed + + if( p_umv_buf && p_umv_buf->command ) + { + // For user mode calls - obtain and verify the vendor information + if ((p_umv_buf->input_size - sizeof (u_int32_t)) != + hca_ul_info->cq_ul_resources_sz || + NULL == p_umv_buf->p_inout_buf) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + cq_ul_resources_p = (void *)p_umv_buf->p_inout_buf; + + /* get the current protection context */ + prot_ctx = MOSAL_get_current_prot_ctx(); + } else { + // for kernel mode calls - allocate app resources. Use prep->call->done sequence + cq_ul_resources_p = cl_zalloc( hca_ul_info->cq_ul_resources_sz); + if (!cq_ul_resources_p) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + if (HH_OK != THHUL_cqm_create_cq_prep(hobul_p->hhul_hndl, *p_size, &hhul_cq_hndl, &cq_size, cq_ul_resources_p)) { + status = IB_ERROR; + goto cleanup; + } + /* get the current protection context */ + prot_ctx = MOSAL_get_kernel_prot_ctx(); + } + + // Allocate the CQ (cmdif) + if (HH_OK != THH_hob_create_cq(hobul_p->hh_hndl, prot_ctx, cq_ul_resources_p, &cq_num)) { + status = IB_INSUFFICIENT_RESOURCES; + goto cleanup_cq; + } + + if( !(p_umv_buf && p_umv_buf->command) ) + { + // Manage user level resources + if (HH_OK != THHUL_cqm_create_cq_done(hobul_p->hhul_hndl, hhul_cq_hndl, cq_num, cq_ul_resources_p)) { + THH_hob_destroy_cq(hobul_p->hh_hndl, cq_num); + status = IB_ERROR; + goto cleanup_cq; + } + } + + // Save data refs for future use + cq_idx = cq_num & hobul_p->cq_idx_mask; + VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_ERROR, cleanup_cq); + cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex); + hobul_p->cq_info_tbl[cq_idx].hca_idx = hob_p->index; + hobul_p->cq_info_tbl[cq_idx].cq_num = cq_num; +// hobul_p->cq_info_tbl[cq_idx].pd_num = pd_idx; + hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl = hhul_cq_hndl; + hobul_p->cq_info_tbl[cq_idx].cq_context = cq_context; + hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p = cq_ul_resources_p; + hobul_p->cq_info_tbl[cq_idx].kernel_mode = !(p_umv_buf && p_umv_buf->command); + hobul_p->cq_info_tbl[cq_idx].mark = E_MARK_CQ; + cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex); + + // Update CA object count + cl_atomic_inc(&hobul_p->count); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("HCA %d count %d\n", h_ca->index, hobul_p->count)); + + *p_size = cq_size; + if (ph_cq) *ph_cq = (ib_cq_handle_t)CQ_HNDL_FROM_CQ(cq_idx); + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = p_umv_buf->input_size; + p_umv_buf->status = IB_SUCCESS; + /* + * Copy the cq_idx back to user + */ + cl_memcpy (((uint8_t* __ptr64)p_umv_buf->p_inout_buf + hca_ul_info->cq_ul_resources_sz), + &cq_num, sizeof (cq_num)); + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_cq: + THHUL_cqm_destroy_cq_done(hobul_p->hhul_hndl, hhul_cq_hndl); + +cleanup: + if( !(p_umv_buf && p_umv_buf->command) && cq_ul_resources_p ) + cl_free( cq_ul_resources_p); + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_resize_cq ( + IN const ib_cq_handle_t h_cq, + IN OUT uint32_t *p_size, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_cq); + UNREFERENCED_PARAMETER(p_size); + UNREFERENCED_PARAMETER(p_umv_buf); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_resize_cq not implemented\n")); + return IB_UNSUPPORTED; +#else + ib_api_status_t status; + + u_int32_t hca_idx = CQ_HCA_FROM_HNDL(h_cq); + u_int32_t cq_num = CQ_NUM_FROM_HNDL(h_cq); + u_int32_t cq_idx; + mlnx_hobul_t *hobul_p; + + HHUL_cq_hndl_t hhul_cq_hndl; + void *cq_ul_resources_p = NULL; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if (!p_size) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + cq_idx = cq_num & hobul_p->cq_idx_mask; + VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if ( E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex); + + hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl; + + if( p_umv_buf && p_umv_buf->command ) + { + // For user mode calls - obtain and verify the vendor information + if( p_umv_buf->input_size != hobul_p->cq_ul_resources_sz || + NULL == p_umv_buf->p_inout_buf ) + { + status = IB_INVALID_PARAMETER; + goto cleanup_locked; + } + cq_ul_resources_p = (void *)p_umv_buf->p_inout_buf; + + } else { + // for kernel mode calls - obtain the saved app resources. Use prep->call->done sequence + cq_ul_resources_p = hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p; + + if (HH_OK != THHUL_cqm_resize_cq_prep( + hobul_p->hhul_hndl, hhul_cq_hndl, + *p_size, p_size, cq_ul_resources_p)) + { + status = IB_ERROR; + goto cleanup_locked; + } + } + + if (HH_OK != THH_hob_resize_cq(hobul_p->hh_hndl, cq_num, cq_ul_resources_p)) { + status = IB_ERROR; + goto cleanup_locked; + } + + // DONE: when called on behalf of kernel module + if (hobul_p->cq_info_tbl[cq_idx].kernel_mode) { + if (HH_OK != THHUL_cqm_resize_cq_done( hobul_p->hhul_hndl, hhul_cq_hndl, cq_ul_resources_p)) + { + status = IB_ERROR; + goto cleanup_locked; + } + } + + cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex); + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = p_umv_buf->input_size; + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_locked: + cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex); + +cleanup: + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_query_cq ( + IN const ib_cq_handle_t h_cq, + OUT uint32_t *p_size, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ +#ifndef WIN_TO_BE_CHANGED + UNREFERENCED_PARAMETER(h_cq); + UNREFERENCED_PARAMETER(p_size); + UNREFERENCED_PARAMETER(p_umv_buf); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mlnx_query_cq not implemented\n")); + return IB_UNSUPPORTED; +#else + ib_api_status_t status; + + u_int32_t hca_idx = CQ_HCA_FROM_HNDL(h_cq); + u_int32_t cq_num = CQ_NUM_FROM_HNDL(h_cq); + u_int32_t cq_idx; + mlnx_hobul_t *hobul_p; + HHUL_cq_hndl_t hhul_cq_hndl; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if (!p_size) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + cq_idx = cq_num & hobul_p->cq_idx_mask; + VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if ( E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex); + + hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl; + + if (HH_OK != THH_hob_query_cq(hobul_p->hh_hndl, cq_num, p_size)) { + status = IB_ERROR; + goto cleanup_locked; + } + + cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex); + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_locked: + cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex); + +cleanup: + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + +ib_api_status_t +mlnx_destroy_cq ( + IN const ib_cq_handle_t h_cq) +{ +#ifndef WIN_TO_BE_CHANGED + + ib_api_status_t status; + int err; + struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq; + + // destroy CQ + if( ib_cq_p->uobject ) { + //TODO: call uverbs + } + else { + err = ib_destroy_cq( ib_cq_p ); + if (err) { + HCA_TRACE (CL_DBG_ERROR, ("ib_destroy_cq failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_destroy_cq; + } + } + + status = IB_SUCCESS; + +err_destroy_cq: + HCA_TRACE_ERR(CL_DBG_ERROR, ("completes with ERROR status %s\n", ib_get_err_str(status))); + HCA_EXIT(MLNX_DBG_TRACE); + return status; + +#else + ib_api_status_t status; + + u_int32_t hca_idx = CQ_HCA_FROM_HNDL(h_cq); + u_int32_t cq_num = CQ_NUM_FROM_HNDL(h_cq); + u_int32_t cq_idx; +// u_int32_t pd_idx = 0; + mlnx_hobul_t *hobul_p; + HHUL_cq_hndl_t hhul_cq_hndl; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + cq_idx = cq_num & hobul_p->cq_idx_mask; + VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if ( E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex); + + hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl; +// pd_idx = hobul_p->cq_info_tbl[cq_idx].pd_num; +// VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_ERROR, cleanup); +// if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { +// status = IB_INVALID_PD_HANDLE; +// goto cleanup_locked; +// } + + // PREP: no PREP required for destroy_cq + if (HH_OK != THH_hob_destroy_cq(hobul_p->hh_hndl, cq_num)) { + status = IB_ERROR; + goto cleanup_locked; + } + + if (hobul_p->cq_info_tbl[cq_idx].kernel_mode) { + if (HH_OK != THHUL_cqm_destroy_cq_done(hobul_p->hhul_hndl, hhul_cq_hndl)) { + status = IB_ERROR; + goto cleanup_locked; + } + if (hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p) + cl_free( hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p); + } + + hobul_p->cq_info_tbl[cq_idx].mark = E_MARK_INVALID; + hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p = NULL; + cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex); + + // Update CA object count + cl_atomic_dec(&hobul_p->count); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CA %d count %d\n", hca_idx, hobul_p->count)); + + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_locked: + cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex); + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +#endif +} + + +void +setup_ci_interface( + IN const ib_net64_t ca_guid, + IN OUT ci_interface_t *p_interface ) +{ + cl_memclr(p_interface, sizeof(*p_interface)); + + /* Guid of the CA. */ + p_interface->guid = ca_guid; + + /* Version of this interface. */ + p_interface->version = VERBS_VERSION; + + /* UVP name */ + cl_memcpy( p_interface->libname, mlnx_uvp_lib_name, MAX_LIB_NAME); + + CL_TRACE(MLNX_DBG_TRACE, g_mlnx_dbg_lvl, ("UVP filename %s\n", p_interface->libname)); + + /* The real interface. */ + p_interface->open_ca = mlnx_open_ca; + p_interface->query_ca = mlnx_query_ca; + p_interface->modify_ca = mlnx_modify_ca; // ++ + p_interface->close_ca = mlnx_close_ca; + p_interface->um_open_ca = mlnx_um_open; + p_interface->um_close_ca = mlnx_um_close; + + p_interface->allocate_pd = mlnx_allocate_pd; + p_interface->deallocate_pd = mlnx_deallocate_pd; + + p_interface->create_av = mlnx_create_av; + p_interface->query_av = mlnx_query_av; + p_interface->modify_av = mlnx_modify_av; + p_interface->destroy_av = mlnx_destroy_av; + + p_interface->create_qp = mlnx_create_qp; + p_interface->create_spl_qp = mlnx_create_spl_qp; + p_interface->modify_qp = mlnx_modify_qp; + p_interface->query_qp = mlnx_query_qp; + p_interface->destroy_qp = mlnx_destroy_qp; + + p_interface->create_cq = mlnx_create_cq; + p_interface->resize_cq = mlnx_resize_cq; + p_interface->query_cq = mlnx_query_cq; + p_interface->destroy_cq = mlnx_destroy_cq; + + p_interface->local_mad = mlnx_local_mad; + + p_interface->vendor_call = fw_access_ctrl; + + mlnx_memory_if(p_interface); + mlnx_direct_if(p_interface); + mlnx_mcast_if(p_interface); + + + return; +} + + diff --git a/branches/MTHCA/hw/mthca/kernel/hca_vp.c b/branches/MTHCA/hw/mthca/kernel/hca_vp.c new file mode 100644 index 00000000..8e1db869 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/hca_vp.c @@ -0,0 +1,19 @@ +#include "hca_driver.h" + +/*leo: structure convertion functions */ + +static inline hca_dev_ext_t *mlnx_hca_to_dev_ext(mlnx_hca_t *hca) { + return container_of(hca, hca_dev_ext_t, hca); +} + +#define pdev_to_mlnx_hca(pdev) pdev_to_mlnx_hca(pdev) +#define pdev_to_dev_ext(pdev) mlnx_hca_to_dev_ext(pdev_to_mlnx_hca(pdev)) + +//#define dev_ext_to_ibdev(p_ext) (&dev_ext_to_mdev(p_ext)->ib_dev) + + +PDEVICE_OBJECT hca_ibdev_to_pdo(void *ib_dev) +{ + struct pci_dev *pdev = mthca_ibdev_to_pdev(ib_dev); + return pdev_to_dev_ext(pdev)->cl_ext.p_self_do; +} diff --git a/branches/MTHCA/hw/mthca/kernel/hca_vp.h b/branches/MTHCA/hw/mthca/kernel/hca_vp.h new file mode 100644 index 00000000..72ee40e9 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/hca_vp.h @@ -0,0 +1,7 @@ +#ifndef HCA_VP_H +#define HCA_VP_H + + +PDEVICE_OBJECT hca_ibdev_to_pdo(void *ib_dev); + +#endif diff --git a/branches/MTHCA/hw/mthca/kernel/mt_cache.c b/branches/MTHCA/hw/mthca/kernel/mt_cache.c new file mode 100644 index 00000000..f25384de --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mt_cache.c @@ -0,0 +1,416 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Intel Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: cache.c 2918 2005-07-27 21:04:40Z roland $ + */ + +#include +#include "mthca_dev.h" +#include + +#include "ib_cache.h" +#include "hca_vp.h" + +struct ib_pkey_cache { + int table_len; + u16 table[0]; +}; + +struct ib_gid_cache { + int table_len; + union ib_gid table[0]; +}; + +struct ib_update_work { + PIO_WORKITEM work_item; + struct ib_device *device; + u8 port_num; +}; + +int ib_get_cached_gid(struct ib_device *device, + u8 port_num, + int index, + union ib_gid *gid) +{ + struct ib_gid_cache *cache; + int ret = 0; + + if (port_num < start_port(device) || port_num > end_port(device)) + return -EINVAL; + + read_lock_irqsave(&device->cache.lock); + + cache = device->cache.gid_cache[port_num - start_port(device)]; + + if (index < 0 || index >= cache->table_len) + ret = -EINVAL; + else + *gid = cache->table[index]; + + read_unlock_irqrestore(&device->cache.lock); + + return ret; +} +EXPORT_SYMBOL(ib_get_cached_gid); + +int ib_find_cached_gid(struct ib_device *device, + union ib_gid *gid, + u8 *port_num, + u16 *index) +{ + struct ib_gid_cache *cache; + int p, i; + int ret = -ENOENT; + + *port_num = -1; + if (index) + *index = -1; + + read_lock_irqsave(&device->cache.lock); + + for (p = 0; p <= end_port(device) - start_port(device); ++p) { + cache = device->cache.gid_cache[p]; + for (i = 0; i < cache->table_len; ++i) { + if (!memcmp(gid, &cache->table[i], sizeof *gid)) { + *port_num = p + start_port(device); + if (index) + *index = (u16)i; + ret = 0; + goto found; + } + } + } +found: + read_unlock_irqrestore(&device->cache.lock); + + return ret; +} +EXPORT_SYMBOL(ib_find_cached_gid); + +int ib_get_cached_pkey(struct ib_device *device, + u8 port_num, + int index, + u16 *pkey) +{ + struct ib_pkey_cache *cache; + int ret = 0; + + if (port_num < start_port(device) || port_num > end_port(device)) + return -EINVAL; + + read_lock_irqsave(&device->cache.lock); + + cache = device->cache.pkey_cache[port_num - start_port(device)]; + + if (index < 0 || index >= cache->table_len) + ret = -EINVAL; + else + *pkey = cache->table[index]; + + read_unlock_irqrestore(&device->cache.lock); + + return ret; +} +EXPORT_SYMBOL(ib_get_cached_pkey); + +int ib_find_cached_pkey(struct ib_device *device, + u8 port_num, + u16 pkey, + u16 *index) +{ + struct ib_pkey_cache *cache; + int i; + int ret = -ENOENT; + + if (port_num < start_port(device) || port_num > end_port(device)) + return -EINVAL; + + read_lock_irqsave(&device->cache.lock); + + cache = device->cache.pkey_cache[port_num - start_port(device)]; + + *index = -1; + + for (i = 0; i < cache->table_len; ++i) + if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) { + *index = (u16)i; + ret = 0; + break; + } + + read_unlock_irqrestore(&device->cache.lock); + + return ret; +} +EXPORT_SYMBOL(ib_find_cached_pkey); + +static void ib_cache_update(struct ib_device *device, + u8 port) +{ + struct ib_port_attr *tprops = NULL; + struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; + struct ib_gid_cache *gid_cache = NULL, *old_gid_cache; + int i; + int ret; + + tprops = kmalloc(sizeof *tprops, GFP_KERNEL); + if (!tprops) + return; + + ret = ib_query_port(device, port, tprops); + if (ret) { + printk(KERN_WARNING "ib_query_port failed (%d) for %s\n", + ret, device->name); + goto err; + } + + pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len * + sizeof *pkey_cache->table, GFP_KERNEL); + if (!pkey_cache) + goto err; + + pkey_cache->table_len = tprops->pkey_tbl_len; + + gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len * + sizeof *gid_cache->table, GFP_KERNEL); + if (!gid_cache) + goto err; + + gid_cache->table_len = tprops->gid_tbl_len; + + for (i = 0; i < pkey_cache->table_len; ++i) { + ret = ib_query_pkey(device, port, (u16)i, pkey_cache->table + i); + if (ret) { + printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n", + ret, device->name, i); + goto err; + } + } + + for (i = 0; i < gid_cache->table_len; ++i) { + ret = ib_query_gid(device, port, i, gid_cache->table + i); + if (ret) { + printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n", + ret, device->name, i); + goto err; + } + } + + write_lock_irq(&device->cache.lock); + + old_pkey_cache = device->cache.pkey_cache[port - start_port(device)]; + old_gid_cache = device->cache.gid_cache [port - start_port(device)]; + + device->cache.pkey_cache[port - start_port(device)] = pkey_cache; + device->cache.gid_cache [port - start_port(device)] = gid_cache; + + write_unlock_irq(&device->cache.lock); + + kfree(old_pkey_cache); + kfree(old_gid_cache); + kfree(tprops); + return; + +err: + kfree(pkey_cache); + kfree(gid_cache); + kfree(tprops); +} + +static void ib_cache_task(void *work_ptr) +{ + struct ib_update_work *work = work_ptr; + + ib_cache_update(work->device, work->port_num); + kfree(work); +} + +#ifdef LINUX_TO_BE_CHANGED +static void ib_cache_event(struct ib_event_handler *handler, + struct ib_event *event) +{ + struct ib_update_work *work; + + if (event->event == IB_EVENT_PORT_ERR || + event->event == IB_EVENT_PORT_ACTIVE || + event->event == IB_EVENT_LID_CHANGE || + event->event == IB_EVENT_PKEY_CHANGE || + event->event == IB_EVENT_SM_CHANGE) { + work = kmalloc(sizeof *work, GFP_ATOMIC); + if (work) { + INIT_WORK(&work->work, ib_cache_task, work); + work->device = event->device; + work->port_num = event->element.port_num; + schedule_work(&work->work); + } + } +} + +#else + + +/* leo: wrapper for Linux work_item callback */ +VOID + ib_work_item ( + IN PDEVICE_OBJECT DeviceObject, + IN PVOID Context + ) +{ + struct ib_update_work *work = (struct ib_update_work *)Context; + ib_cache_task(Context); + IoFreeWorkItem(work->work_item); +} + +static void ib_cache_event(struct ib_event_handler *handler, + struct ib_event *event) +{ + struct ib_update_work *work; + + if (event->event == IB_EVENT_PORT_ERR || + event->event == IB_EVENT_PORT_ACTIVE || + event->event == IB_EVENT_LID_CHANGE || + event->event == IB_EVENT_PKEY_CHANGE || + event->event == IB_EVENT_SM_CHANGE) { + work = kmalloc(sizeof *work, GFP_ATOMIC); + //???: what will happen on allocation failure (leo) + if (work) { + work->device = event->device; + work->port_num = event->element.port_num; + + { // schedule a work item to work + // get PDO + PDEVICE_OBJECT pdo = handler->device->mdev->ext->cl_ext.p_self_do; + + // allocate work item + work->work_item = IoAllocateWorkItem(pdo); + if (work->work_item == NULL) { + //TODO: at least - print error. Need to return code, but the function is void + } + else { // schedule the work + IoQueueWorkItem( + work->work_item, + ib_work_item, + DelayedWorkQueue, + work + ); + } + } + + } + } +} + +#endif + +static void ib_cache_setup_one(struct ib_device *device) +{ + int p; + + rwlock_init(&device->cache.lock); + + device->cache.pkey_cache = + kmalloc(sizeof *device->cache.pkey_cache * + (end_port(device) - start_port(device) + 1), GFP_KERNEL); + device->cache.gid_cache = + kmalloc(sizeof *device->cache.pkey_cache * + (end_port(device) - start_port(device) + 1), GFP_KERNEL); + + if (!device->cache.pkey_cache || !device->cache.gid_cache) { + printk(KERN_WARNING "Couldn't allocate cache " + "for %s\n", device->name); + goto err; + } + + for (p = 0; p <= end_port(device) - start_port(device); ++p) { + device->cache.pkey_cache[p] = NULL; + device->cache.gid_cache [p] = NULL; + ib_cache_update(device, p + start_port(device)); + } + + INIT_IB_EVENT_HANDLER(&device->cache.event_handler, + device, ib_cache_event); + if (ib_register_event_handler(&device->cache.event_handler)) + goto err_cache; + + return; + +err_cache: + for (p = 0; p <= end_port(device) - start_port(device); ++p) { + kfree(device->cache.pkey_cache[p]); + kfree(device->cache.gid_cache[p]); + } + +err: + kfree(device->cache.pkey_cache); + kfree(device->cache.gid_cache); +} + +static void ib_cache_cleanup_one(struct ib_device *device) +{ + int p; + + ib_unregister_event_handler(&device->cache.event_handler); +#ifdef LINUX_TO_BE_CHANGED + //TODO: how to do that ? + flush_scheduled_work(); +#endif + + for (p = 0; p <= end_port(device) - start_port(device); ++p) { + kfree(device->cache.pkey_cache[p]); + kfree(device->cache.gid_cache[p]); + } + + kfree(device->cache.pkey_cache); + kfree(device->cache.gid_cache); +} + +#ifdef LINUX_TO_BE_REMOVED +static struct ib_client cache_client = { + .name = "cache", + .add = ib_cache_setup_one, + .remove = ib_cache_cleanup_one +}; +#else +static struct ib_client cache_client = { "cache", ib_cache_setup_one, ib_cache_cleanup_one }; +#endif + +int __init ib_cache_setup(void) +{ + return ib_register_client(&cache_client); +} + +void __exit ib_cache_cleanup(void) +{ + ib_unregister_client(&cache_client); +} diff --git a/branches/MTHCA/hw/mthca/kernel/mt_device.c b/branches/MTHCA/hw/mthca/kernel/mt_device.c new file mode 100644 index 00000000..31cba28e --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mt_device.c @@ -0,0 +1,601 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: device.c 2730 2005-06-28 16:43:03Z sean.hefty $ + */ + +#include "hca_driver.h" +#include "ib_verbs.h" +#include "ib_cache.h" + +struct ib_client_data { + struct list_head list; + struct ib_client *client; + void * data; +}; + +static LIST_HEAD(device_list); +static LIST_HEAD(client_list); + +/* + * device_mutex protects access to both device_list and client_list. + * There's no real point to using multiple locks or something fancier + * like an rwsem: we always access both lists, and we're always + * modifying one list or the other list. In any case this is not a + * hot path so there's no point in trying to optimize. + */ +KMUTEX device_mutex; + +static int ib_device_check_mandatory(struct ib_device *device) +{ +#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x } + static const struct { + size_t offset; + char *name; + } mandatory_table[] = { + IB_MANDATORY_FUNC(query_device), + IB_MANDATORY_FUNC(query_port), + IB_MANDATORY_FUNC(query_pkey), + IB_MANDATORY_FUNC(query_gid), + IB_MANDATORY_FUNC(alloc_pd), + IB_MANDATORY_FUNC(dealloc_pd), + IB_MANDATORY_FUNC(create_ah), + IB_MANDATORY_FUNC(destroy_ah), + IB_MANDATORY_FUNC(create_qp), + IB_MANDATORY_FUNC(modify_qp), + IB_MANDATORY_FUNC(destroy_qp), + IB_MANDATORY_FUNC(post_send), + IB_MANDATORY_FUNC(post_recv), + IB_MANDATORY_FUNC(create_cq), + IB_MANDATORY_FUNC(destroy_cq), + IB_MANDATORY_FUNC(poll_cq), + IB_MANDATORY_FUNC(req_notify_cq), + IB_MANDATORY_FUNC(get_dma_mr), + IB_MANDATORY_FUNC(dereg_mr) + }; + int i; + + for (i = 0; i < sizeof mandatory_table / sizeof mandatory_table[0]; ++i) { + if (!*(void **) ((u8 *) device + mandatory_table[i].offset)) { + printk(KERN_WARNING "Device %s is missing mandatory function %s\n", + device->name, mandatory_table[i].name); + return -EINVAL; + } + } + + return 0; +} + +static struct ib_device *__ib_device_get_by_name(const char *name) +{ + struct ib_device *device; + + list_for_each_entry(device, &device_list, core_list,struct ib_device) + if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX)) + return device; + + return NULL; +} + +static int __extract_number(char *dest_str, const char *format, int *num) +{ + char *ptr; + for (ptr = dest_str; *ptr; ptr++) { + if (*ptr >= '0' && *ptr <= '9') { + *num = atoi(ptr); + return 1; + } + } + return 0; +} +static int alloc_name(char *name) +{ + long *inuse; + char buf[IB_DEVICE_NAME_MAX]; + struct ib_device *device; + int i; + + inuse = (long *) get_zeroed_page(GFP_KERNEL); + if (!inuse) + return -ENOMEM; + + list_for_each_entry(device, &device_list, core_list,struct ib_device) { + if (!__extract_number(device->name, name, &i)) + continue; + if (i < 0 || i >= PAGE_SIZE * 8) + continue; + snprintf(buf, sizeof buf, name, i); + if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX)) + set_bit(i, inuse); + } + + i = find_first_zero_bit(inuse, PAGE_SIZE * 8); + free_page(inuse); + snprintf(buf, sizeof buf, name, i); + + if (__ib_device_get_by_name(buf)) + return -ENFILE; + + strlcpy(name, buf, IB_DEVICE_NAME_MAX); + return 0; +} + +#ifdef LINUX_TO_BE_REMOVED +/** + * ib_alloc_device - allocate an IB device struct + * @size:size of structure to allocate + * + * Low-level drivers should use ib_alloc_device() to allocate &struct + * ib_device. @size is the size of the structure to be allocated, + * including any private data used by the low-level driver. + * ib_dealloc_device() must be used to free structures allocated with + * ib_alloc_device(). + */ +struct ib_device *ib_alloc_device(size_t size) +{ + void *dev; + + BUG_ON(size < sizeof (struct ib_device)); + + dev = kmalloc(size, GFP_KERNEL); + if (!dev) + return NULL; + + RtlZeroMemory(dev, size); + + return dev; +} + + +/** + * ib_dealloc_device - free an IB device struct + * @device:structure to free + * + * Free a structure allocated with ib_alloc_device(). + */ +void ib_dealloc_device(struct ib_device *device) +{ + if (device->reg_state == IB_DEV_UNINITIALIZED) { + kfree(device); + return; + } + + BUG_ON(device->reg_state != IB_DEV_UNREGISTERED); + + ib_device_unregister_sysfs(device); +} +#endif + + +static int add_client_context(struct ib_device *device, struct ib_client *client) +{ + struct ib_client_data *context; + + context = kmalloc(sizeof *context, GFP_KERNEL); + if (!context) { + printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n", + device->name, client->name); + return -ENOMEM; + } + + context->client = client; + context->data = NULL; + + spin_lock_irqsave(&device->client_data_lock); + list_add(&context->list, &device->client_data_list); + spin_unlock_irqrestore(&device->client_data_lock); + + return 0; +} + +/** + * ib_register_device - Register an IB device with IB core + * @device:Device to register + * + * Low-level drivers use ib_register_device() to register their + * devices with the IB core. All registered clients will receive a + * callback for each device that is added. @device must be allocated + * with ib_alloc_device(). + */ +int ib_register_device(struct ib_device *device) +{ + int ret; + + down(&device_mutex); + + if (strchr(device->name, '%')) { + ret = alloc_name(device->name); + if (ret) + goto out; + } + + if (ib_device_check_mandatory(device)) { + ret = -EINVAL; + goto out; + } + + INIT_LIST_HEAD(&device->event_handler_list); + INIT_LIST_HEAD(&device->client_data_list); + spin_lock_init(&device->event_handler_lock); + spin_lock_init(&device->client_data_lock); + +#ifdef LINUX_TO_BE_REMOVED + ret = ib_device_register_sysfs(device); + if (ret) { + printk(KERN_WARNING "Couldn't register device %s with driver model\n", + device->name); + goto out; + } +#endif + + list_add_tail(&device->core_list, &device_list); + + { + struct ib_client *client; + + list_for_each_entry(client, &client_list, list,struct ib_client) + if (client->add && !add_client_context(device, client)) + client->add(device); + } + + out: + up(&device_mutex); + return ret; +} + + +/** + * ib_unregister_device - Unregister an IB device + * @device:Device to unregister + * + * Unregister an IB device. All clients will receive a remove callback. + */ +void ib_unregister_device(struct ib_device *device) +{ + struct ib_client *client; + struct ib_client_data *context, *tmp; + + down(&device_mutex); + + list_for_each_entry_reverse(client, &client_list, list,struct ib_client) + if (client->remove) + client->remove(device); + + list_del(&device->core_list); + + up(&device_mutex); + + spin_lock_irqsave(&device->client_data_lock); + list_for_each_entry_safe(context, tmp, &device->client_data_list, list,struct ib_client_data,struct ib_client_data) + kfree(context); + spin_unlock_irqrestore(&device->client_data_lock); + +} + + +/** + * ib_register_client - Register an IB client + * @client:Client to register + * + * Upper level users of the IB drivers can use ib_register_client() to + * register callbacks for IB device addition and removal. When an IB + * device is added, each registered client's add method will be called + * (in the order the clients were registered), and when a device is + * removed, each client's remove method will be called (in the reverse + * order that clients were registered). In addition, when + * ib_register_client() is called, the client will receive an add + * callback for all devices already registered. + */ +int ib_register_client(struct ib_client *client) +{ + struct ib_device *device; + + down(&device_mutex); + + list_add_tail(&client->list, &client_list); + list_for_each_entry(device, &device_list, core_list,struct ib_device) + if (client->add && !add_client_context(device, client)) + client->add(device); + + up(&device_mutex); + + return 0; +} + + +/** + * ib_unregister_client - Unregister an IB client + * @client:Client to unregister + * + * Upper level users use ib_unregister_client() to remove their client + * registration. When ib_unregister_client() is called, the client + * will receive a remove callback for each IB device still registered. + */ +void ib_unregister_client(struct ib_client *client) +{ + struct ib_client_data *context, *tmp; + struct ib_device *device; + + down(&device_mutex); + + list_for_each_entry(device, &device_list, core_list,struct ib_device) { + if (client->remove) + client->remove(device); + + spin_lock_irqsave(&device->client_data_lock); + list_for_each_entry_safe(context, tmp, &device->client_data_list, list,struct ib_client_data,struct ib_client_data) + if (context->client == client) { + list_del(&context->list); + kfree(context); + } + spin_unlock_irqrestore(&device->client_data_lock); + } + list_del(&client->list); + + up(&device_mutex); +} + + +/** + * ib_get_client_data - Get IB client context + * @device:Device to get context for + * @client:Client to get context for + * + * ib_get_client_data() returns client context set with + * ib_set_client_data(). + */ +void *ib_get_client_data(struct ib_device *device, struct ib_client *client) +{ + struct ib_client_data *context; + void *ret = NULL; + + spin_lock_irqsave(&device->client_data_lock); + list_for_each_entry(context, &device->client_data_list, list,struct ib_client_data) + if (context->client == client) { + ret = context->data; + break; + } + spin_unlock_irqrestore(&device->client_data_lock); + + return ret; +} + + +/** + * ib_set_client_data - Get IB client context + * @device:Device to set context for + * @client:Client to set context for + * @data:Context to set + * + * ib_set_client_data() sets client context that can be retrieved with + * ib_get_client_data(). + */ +void ib_set_client_data(struct ib_device *device, struct ib_client *client, + void *data) +{ + struct ib_client_data *context; + + spin_lock_irqsave(&device->client_data_lock); + list_for_each_entry(context, &device->client_data_list, list,struct ib_client_data) + if (context->client == client) { + context->data = data; + goto out; + } + + printk(KERN_WARNING "No client context found for %s/%s\n", + device->name, client->name); + +out: + spin_unlock_irqrestore(&device->client_data_lock); +} + + +/** + * ib_register_event_handler - Register an IB event handler + * @event_handler:Handler to register + * + * ib_register_event_handler() registers an event handler that will be + * called back when asynchronous IB events occur (as defined in + * chapter 11 of the InfiniBand Architecture Specification). This + * callback may occur in interrupt context. + */ +int ib_register_event_handler (struct ib_event_handler *event_handler) +{ + + spin_lock_irqsave(&event_handler->device->event_handler_lock); + list_add_tail(&event_handler->list, + &event_handler->device->event_handler_list); + spin_unlock_irqrestore(&event_handler->device->event_handler_lock); + + return 0; +} + + +/** + * ib_unregister_event_handler - Unregister an event handler + * @event_handler:Handler to unregister + * + * Unregister an event handler registered with + * ib_register_event_handler(). + */ +int ib_unregister_event_handler(struct ib_event_handler *event_handler) +{ + spin_lock_irqsave(&event_handler->device->event_handler_lock); + list_del(&event_handler->list); + spin_unlock_irqrestore(&event_handler->device->event_handler_lock); + + return 0; +} + + +/** + * ib_dispatch_event - Dispatch an asynchronous event + * @event:Event to dispatch + * + * Low-level drivers must call ib_dispatch_event() to dispatch the + * event to all registered event handlers when an asynchronous event + * occurs. + */ +void ib_dispatch_event(struct ib_event *event) +{ + struct ib_event_handler *handler; + + spin_lock_irqsave(&event->device->event_handler_lock); + + list_for_each_entry(handler, &event->device->event_handler_list, list,struct ib_event_handler) + handler->handler(handler, event); + + spin_unlock_irqrestore(&event->device->event_handler_lock); +} + + +/** + * ib_query_device - Query IB device attributes + * @device:Device to query + * @device_attr:Device attributes + * + * ib_query_device() returns the attributes of a device through the + * @device_attr pointer. + */ +int ib_query_device(struct ib_device *device, + struct ib_device_attr *device_attr) +{ + return device->query_device(device, device_attr); +} + + +/** + * ib_query_port - Query IB port attributes + * @device:Device to query + * @port_num:Port number to query + * @port_attr:Port attributes + * + * ib_query_port() returns the attributes of a port through the + * @port_attr pointer. + */ +int ib_query_port(struct ib_device *device, + u8 port_num, + struct ib_port_attr *port_attr) +{ + return device->query_port(device, port_num, port_attr); +} + + +/** + * ib_query_gid - Get GID table entry + * @device:Device to query + * @port_num:Port number to query + * @index:GID table index to query + * @gid:Returned GID + * + * ib_query_gid() fetches the specified GID table entry. + */ +int ib_query_gid(struct ib_device *device, + u8 port_num, int index, union ib_gid *gid) +{ + return device->query_gid(device, port_num, index, gid); +} + + +/** + * ib_query_pkey - Get P_Key table entry + * @device:Device to query + * @port_num:Port number to query + * @index:P_Key table index to query + * @pkey:Returned P_Key + * + * ib_query_pkey() fetches the specified P_Key table entry. + */ +int ib_query_pkey(struct ib_device *device, + u8 port_num, u16 index, u16 *pkey) +{ + return device->query_pkey(device, port_num, index, pkey); +} + + +/** + * ib_modify_device - Change IB device attributes + * @device:Device to modify + * @device_modify_mask:Mask of attributes to change + * @device_modify:New attribute values + * + * ib_modify_device() changes a device's attributes as specified by + * the @device_modify_mask and @device_modify structure. + */ +int ib_modify_device(struct ib_device *device, + int device_modify_mask, + struct ib_device_modify *device_modify) +{ + return device->modify_device(device, device_modify_mask, + device_modify); +} + + +/** + * ib_modify_port - Modifies the attributes for the specified port. + * @device: The device to modify. + * @port_num: The number of the port to modify. + * @port_modify_mask: Mask used to specify which attributes of the port + * to change. + * @port_modify: New attribute values for the port. + * + * ib_modify_port() changes a port's attributes as specified by the + * @port_modify_mask and @port_modify structure. + */ +int ib_modify_port(struct ib_device *device, + u8 port_num, int port_modify_mask, + struct ib_port_modify *port_modify) +{ + return device->modify_port(device, port_num, port_modify_mask, + port_modify); +} + +int ib_core_init(void) +{ + int ret; + + /* leo: added because there is no static init of semaphore in Windows */ + KeInitializeMutex(&device_mutex,0); + + ret = ib_cache_setup(); + if (ret) { + printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); + } + + return ret; +} + +void ib_core_cleanup(void) +{ + ib_cache_cleanup(); +} + diff --git a/branches/MTHCA/hw/mthca/kernel/mt_l2w.c b/branches/MTHCA/hw/mthca/kernel/mt_l2w.c new file mode 100644 index 00000000..1c4d7e3b --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mt_l2w.c @@ -0,0 +1,150 @@ +#include + +u64 mt_ticks_per_sec; + +void MT_time_calibrate() +{ +#if defined(_WIN64) && (defined(IA64) || defined(_IA64_)) + /* Itanium */ + mt_ticks_per_sec = 10000000; + +#elif defined(_WIN64) && (defined(AMD64) || defined(_AMD64_)) + /* x64 */ + + LARGE_INTEGER a; + KeQueryPerformanceCounter( &a ); + mt_ticks_per_sec = a.QuadPart; + +#elif defined(_WIN32) && (defined(i386) || defined(_x86_)) + /* x86 */ + + LARGE_INTEGER a; + KeQueryPerformanceCounter( &a ); + mt_ticks_per_sec = a.QuadPart; + +#else + #error Unsupported platform +#endif + +} + +pci_pool_t * +pci_pool_create (const char *name, struct mthca_dev *mdev, + size_t size, size_t align, size_t allocation) +{ + pci_pool_t *pool; + UNREFERENCED_PARAMETER(align); + + MT_ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + + // allocation parameter is not handled yet + ASSERT(allocation == 0); + + // allocate object + pool = (pci_pool_t *)ExAllocatePoolWithTag( NonPagedPool, sizeof(pci_pool_t), MT_TAG_PCIPOOL ); + if (pool == NULL) + return NULL; + + //TODO: not absolutely correct: Linux's pci_pool_alloc provides contiguous physical memory, + // while default alloc function - ExAllocatePoolWithTag -doesn't. + // But for now it is used for elements of size <= PAGE_SIZE + // Anyway - a sanity check: + ASSERT(size <= PAGE_SIZE); + if (size > PAGE_SIZE) + return NULL; + + //TODO: not too effective: one can read its own alloc/free functions + ExInitializeNPagedLookasideList( &pool->pool_hdr, NULL, NULL, 0, size, MT_TAG_PCIPOOL, 0 ); + + // fill the object + pool->mdev = mdev; + pool->size = size; + strncpy( pool->name, name, sizeof pool->name ); + + return pool; +} + +// from lib/string.c +/** +* strlcpy - Copy a %NUL terminated string into a sized buffer +* @dest: Where to copy the string to +* @src: Where to copy the string from +* @size: size of destination buffer +* +* Compatible with *BSD: the result is always a valid +* NUL-terminated string that fits in the buffer (unless, +* of course, the buffer size is zero). It does not pad +* out the result like strncpy() does. +*/ +SIZE_T strlcpy(char *dest, const char *src, SIZE_T size) +{ + SIZE_T ret = strlen(src); + + if (size) { + SIZE_T len = (ret >= size) ? size-1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} + + +int __bitmap_full(const unsigned long *bitmap, int bits) +{ + int k, lim = bits/BITS_PER_LONG; + for (k = 0; k < lim; ++k) + if (~bitmap[k]) + return 0; + + if (bits % BITS_PER_LONG) + if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) + return 0; + + return 1; +} + +int __bitmap_empty(const unsigned long *bitmap, int bits) +{ + int k, lim = bits/BITS_PER_LONG; + for (k = 0; k < lim; ++k) + if (bitmap[k]) + return 0; + + if (bits % BITS_PER_LONG) + if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) + return 0; + + return 1; +} + +int request_irq( + IN CM_PARTIAL_RESOURCE_DESCRIPTOR *int_info, /* interrupt resources */ + IN KSPIN_LOCK *isr_lock, /* spin lock for ISR */ + IN PKSERVICE_ROUTINE isr, /* ISR */ + IN void *isr_ctx, /* ISR context */ + OUT PKINTERRUPT *int_obj /* interrupt object */ + ) +{ + NTSTATUS status; + + status = IoConnectInterrupt( + int_obj, /* InterruptObject */ + isr, /* ISR */ + isr_ctx, /* ISR context */ + isr_lock, /* spinlock */ + int_info->u.Interrupt.Vector, /* interrupt vector */ + (KIRQL)int_info->u.Interrupt.Level, /* IRQL */ + (KIRQL)int_info->u.Interrupt.Level, /* Synchronize IRQL */ + (int_info->Flags == CM_RESOURCE_INTERRUPT_LATCHED) ? + Latched : LevelSensitive, /* interrupt type: LATCHED or LEVEL */ + int_info->ShareDisposition == CmResourceShareShared, /* vector shared or not */ + (KAFFINITY)int_info->u.Interrupt.Affinity, /* interrupt affinity */ + FALSE /* whether to save Float registers */ + ); + + if (!NT_SUCCESS(status)) + return -EFAULT; /* failed to connect interrupt */ + else + return 0; +} + diff --git a/branches/MTHCA/hw/mthca/kernel/mt_memory.c b/branches/MTHCA/hw/mthca/kernel/mt_memory.c new file mode 100644 index 00000000..7a08a793 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mt_memory.c @@ -0,0 +1,315 @@ +#include "hca_driver.h" +#include "mthca_dev.h" + + +void * alloc_pages( + IN struct mthca_dev *dev, + IN unsigned long cur_order, + OUT dma_addr_t *p_da) +{ + PHYSICAL_ADDRESS pa = {0}; + DMA_ADAPTER * p_dma = dev->ext->p_dma_adapter; + void * va = p_dma->DmaOperations->AllocateCommonBuffer( + p_dma, PAGE_SIZE << cur_order, &pa, FALSE ); + RtlZeroMemory( va, PAGE_SIZE << cur_order ); + *p_da = pa.QuadPart; + return va; +} + +void free_pages( + IN struct mthca_dev *dev, + IN unsigned long cur_order, + IN void *va, + IN dma_addr_t da) +{ + PHYSICAL_ADDRESS pa = {0}; + DMA_ADAPTER * p_dma = dev->ext->p_dma_adapter; + pa.QuadPart = da; + p_dma->DmaOperations->FreeCommonBuffer( + p_dma, PAGE_SIZE << cur_order, pa, va, FALSE ); +} + +/* +* Function: map user buffer to kernel and lock it +* +* Return: +*/ +int get_user_pages( + IN struct mthca_dev *dev, /* device */ + IN u64 start, /* address in user space */ + IN int npages, /* size in pages */ + IN int write_access, /* access rights */ + OUT void **pages, /* mapped kernel address */ + OUT PMDL *p_mdl /* MDL */ + ) +{ + PMDL mdl_p; + int size = PAGE_SIZE << npages; + int access = (write_access) ? IoWriteAccess : IoReadAccess; + int err; + void * kva; /* kernel virtual address */ + + ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); + + /* allocate MDL */ + mdl_p = IoAllocateMdl( (PVOID)start, (ULONG)size, + FALSE, + FALSE, /* not charge quota */ + NULL); + if (mdl_p == NULL) { + err = -ENOMEM; + goto err0; + } + + /* lock memory */ + __try { + MmProbeAndLockPages( mdl_p, UserMode, access ); + } + __except (EXCEPTION_EXECUTE_HANDLER) + { + NTSTATUS Status = GetExceptionCode(); + printk(KERN_ERROR "Exception 0x%x on MmProbeAndLockPages(), addr 0xI64x, size %d\n", Status, start, size); + err = -EACCES; + goto err1; + } + + /* map it to kernel */ + kva = MmMapLockedPagesSpecifyCache( mdl_p, + KernelMode, MmNonCached, + NULL, FALSE, NormalPagePriority ); + if (kva == NULL) { + printk(KERN_ERROR "MmMapLockedPagesSpecifyCache failed\n"); + goto err2; + } + + *pages = kva; + *p_mdl = mdl_p; + return 0; + +err2: + MmUnlockPages(mdl_p); +err1: + IoFreeMdl(mdl_p); +err0: + return err; + + } + +void put_page(struct scatterlist *sg) +{ + if (sg->p_mdl) { + MmUnmapLockedPages( sg->page, sg->p_mdl ); + MmUnlockPages(sg->p_mdl); + IoFreeMdl(sg->p_mdl); + } +} + +void* alloc_dma_mem( + IN struct mthca_dev *dev, + IN unsigned long size, + OUT dma_addr_t *p_da) +{ + PHYSICAL_ADDRESS pa = {0}; + DMA_ADAPTER * p_dma = dev->ext->p_dma_adapter; + void * va = p_dma->DmaOperations->AllocateCommonBuffer( + p_dma, size, &pa, FALSE ); + *p_da = pa.QuadPart; + return va; +} + +void free_dma_mem( + IN struct mthca_dev *dev, + IN unsigned long size, + IN void *va, + IN dma_addr_t da) +{ + PHYSICAL_ADDRESS pa = {0}; + DMA_ADAPTER * p_dma = dev->ext->p_dma_adapter; + pa.QuadPart = da; + p_dma->DmaOperations->FreeCommonBuffer( + p_dma, size, pa, va, FALSE ); +} + +typedef struct _mt_iobuf_seg { + LIST_ENTRY link; + PMDL mdl_p; + u64 va; /* virtual address of the buffer */ + u64 size; /* size in bytes of the buffer */ + u32 nr_pages; + int is_user; +} mt_iobuf_seg_t; + +static int register_segment( + IN u64 va, + IN u64 size, + IN int is_user, + IN u32 acc, + IN OUT mt_iobuf_t * iobuf_p) +{ + PMDL mdl_p; + int rc; + KPROCESSOR_MODE mode; + mt_iobuf_seg_t * new_iobuf; + static ULONG cnt=0; + LOCK_OPERATION Operation; + + // set Operation + if (acc & IB_ACCESS_LOCAL_WRITE) + Operation = IoModifyAccess; + else + Operation = IoReadAccess; + + // allocate IOBUF segment object + new_iobuf = (mt_iobuf_seg_t *)kmalloc(sizeof(mt_iobuf_seg_t), GFP_KERNEL ); + if (new_iobuf == NULL) { + rc = -ENOMEM; + goto err_nomem; + } + + // allocate MDL + mdl_p = IoAllocateMdl( (PVOID)va, (ULONG)size, FALSE,FALSE,NULL); + if (mdl_p == NULL) { + rc = ENOMEM; + goto err_alloc_mdl; + } + + // make context-dependent things + if (is_user) { + ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); + mode = UserMode; + } + else { /* Mapping to kernel virtual address */ + // MmBuildMdlForNonPagedPool(mdl_p); // fill MDL ??? - should we do that really ? + mode = KernelMode; + } + + __try { /* try */ + MmProbeAndLockPages( mdl_p, mode, Operation ); /* lock memory */ + } /* try */ + + __except (EXCEPTION_EXECUTE_HANDLER) { + NTSTATUS Status = GetExceptionCode(); + HCA_TRACE( HCA_DBG_ERROR, + ("MOSAL_iobuf_register: Exception 0x%x on MmProbeAndLockPages(), va %p, sz %d\n", + Status, va, size)); + rc = -EACCES; + goto err_probe; + } + + // fill IOBUF object + new_iobuf->va = va; + new_iobuf->size= size; + new_iobuf->nr_pages = ADDRESS_AND_SIZE_TO_SPAN_PAGES( va, size ); + new_iobuf->mdl_p = mdl_p; + new_iobuf->is_user = is_user; + InsertTailList( &iobuf_p->seg_que, &new_iobuf->link ); + return 0; + +err_probe: + IoFreeMdl(mdl_p); +err_alloc_mdl: + ExFreePool((PVOID)new_iobuf); +err_nomem: + return rc; +} + +int iobuf_register( + IN u64 va, + IN u64 size, + IN int is_user, + IN int acc, + IN OUT mt_iobuf_t *iobuf_p) +{ + int rc; + u64 seg_va = va; // current segment start + u64 seg_size = size; // current segment size + u64 rdc = size; // remain data counter - what is rest to lock + u64 delta; // he size of the last not full page of the first segment + mt_iobuf_seg_t * iobuf_seg_p; // pointer to current segment object + unsigned page_size = PAGE_SIZE; + +// 32 - for any case +#define PFNS_IN_PAGE_SIZE_MDL ((PAGE_SIZE - sizeof(struct _MDL) - 32) / sizeof(long)) +#define MIN_IOBUF_SEGMENT_SIZE (PAGE_SIZE * PFNS_IN_PAGE_SIZE_MDL) // 4MB + + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + + // init IOBUF object + InitializeListHead( &iobuf_p->seg_que ); + iobuf_p->seg_num = 0; + + // allocate segments + while (rdc > 0) { + // map a segment + rc = register_segment(seg_va, seg_size, is_user, acc, iobuf_p ); + + // success - move to another segment + if (!rc) { + rdc -= seg_size; + seg_va += seg_size; + iobuf_p->seg_num++; + if (seg_size > rdc) + seg_size = rdc; + continue; + } + + // failure - too large a buffer: lessen it and try once more + if (rc == -ENOMEM) { + // no where to lessen - too low memory + if (seg_size <= MIN_IOBUF_SEGMENT_SIZE) + break; + // lessen the size + seg_size >>= 1; + // round the segment size to the page boundary (only for the first segment) + if (iobuf_p->seg_num == 0) { + delta = (seg_va + seg_size) & (page_size - 1); + seg_size -= delta; + seg_size += page_size; + if (seg_size > rdc) + seg_size = rdc; + } + continue; + } + + // got unrecoverable error + break; + } + + // SUCCESS + if (rc) + iobuf_deregister( iobuf_p ); + else { + // fill IOBUF object + iobuf_p->va = va; + iobuf_p->size= size; + iobuf_p->nr_pages = ADDRESS_AND_SIZE_TO_SPAN_PAGES( va, size ); + iobuf_p->is_user = is_user; + } + + return rc; +} + + +static void deregister_segment(mt_iobuf_seg_t * iobuf_seg_p) +{ + MmUnlockPages( iobuf_seg_p->mdl_p ); // unlock the buffer + IoFreeMdl( iobuf_seg_p->mdl_p ); // free MDL + ExFreePool(iobuf_seg_p); +} + +void iobuf_deregister(mt_iobuf_t *iobuf_p) +{ + mt_iobuf_seg_t *iobuf_seg_p; // pointer to current segment object + + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + + // release segments + while (!IsListEmpty( &iobuf_p->seg_que )) { + iobuf_seg_p = (mt_iobuf_seg_t *)(PVOID)RemoveTailList( &iobuf_p->seg_que ); + deregister_segment(iobuf_seg_p); + iobuf_p->seg_num--; + } + ASSERT(iobuf_p->seg_num == 0); +} + + diff --git a/branches/MTHCA/hw/mthca/kernel/mt_packer.c b/branches/MTHCA/hw/mthca/kernel/mt_packer.c new file mode 100644 index 00000000..66a9f674 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mt_packer.c @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: packer.c 2730 2005-06-28 16:43:03Z sean.hefty $ + */ + +#include + +static u64 value_read(int offset, int size, u8 *structure) +{ + switch (size) { + case 1: return *(u8 *) (structure + offset); + case 2: return be16_to_cpup((__be16 *) (structure + offset)); + case 4: return be32_to_cpup((__be32 *) (structure + offset)); + case 8: return be64_to_cpup((__be64 *) (structure + offset)); + default: + printk(KERN_WARNING "Field size %d bits not handled\n", size * 8); + return 0; + } +} + +/** + * ib_pack - Pack a structure into a buffer + * @desc:Array of structure field descriptions + * @desc_len:Number of entries in @desc + * @structure:Structure to pack from + * @buf:Buffer to pack into + * + * ib_pack() packs a list of structure fields into a buffer, + * controlled by the array of fields in @desc. + */ +void ib_pack(const struct ib_field *desc, + int desc_len, + u8 *structure, + u8 *buf) +{ + int i; + + for (i = 0; i < desc_len; ++i) { + if (desc[i].size_bits <= 32) { + int shift; + u32 val; + __be32 mask; + __be32 *addr; + + shift = 32 - desc[i].offset_bits - desc[i].size_bits; + if (desc[i].struct_size_bytes) + val = (u32)value_read(desc[i].struct_offset_bytes, + desc[i].struct_size_bytes, + structure) << shift; + else + val = 0; + + mask = cpu_to_be32(((1ull << desc[i].size_bits) - 1) << shift); + addr = (__be32 *) buf + desc[i].offset_words; + *addr = (*addr & ~mask) | (cpu_to_be32(val) & mask); + } else if (desc[i].size_bits <= 64) { + int shift; + u64 val; + __be64 mask; + __be64 *addr; + + shift = 64 - desc[i].offset_bits - desc[i].size_bits; + if (desc[i].struct_size_bytes) + val = value_read(desc[i].struct_offset_bytes, + desc[i].struct_size_bytes, + structure) << shift; + else + val = 0; + + mask = cpu_to_be64((~0ull >> (64 - desc[i].size_bits)) << shift); + addr = (__be64 *) ((__be32 *) buf + desc[i].offset_words); + *addr = (*addr & ~mask) | (cpu_to_be64(val) & mask); + } else { + if (desc[i].offset_bits % 8 || + desc[i].size_bits % 8) { + printk(KERN_WARNING "Structure field %s of size %d " + "bits is not byte-aligned\n", + desc[i].field_name, desc[i].size_bits); + } + + if (desc[i].struct_size_bytes) + memcpy(buf + desc[i].offset_words * 4 + + desc[i].offset_bits / 8, + structure + desc[i].struct_offset_bytes, + desc[i].size_bits / 8); + else + RtlZeroMemory(buf + desc[i].offset_words * 4 + desc[i].offset_bits / 8, + desc[i].size_bits / 8); + } + } +} +EXPORT_SYMBOL(ib_pack); + +static void value_write(int offset, int size, u64 val, u8 *structure) +{ + switch (size * 8) { + case 8: *( u8 *) (structure + offset) = (u8)val; break; + case 16: *(__be16 *) (structure + offset) = cpu_to_be16(val); break; + case 32: *(__be32 *) (structure + offset) = cpu_to_be32(val); break; + case 64: *(__be64 *) (structure + offset) = cpu_to_be64(val); break; + default: + printk(KERN_WARNING "Field size %d bits not handled\n", size * 8); + } +} + +/** + * ib_unpack - Unpack a buffer into a structure + * @desc:Array of structure field descriptions + * @desc_len:Number of entries in @desc + * @buf:Buffer to unpack from + * @structure:Structure to unpack into + * + * ib_pack() unpacks a list of structure fields from a buffer, + * controlled by the array of fields in @desc. + */ +void ib_unpack(const struct ib_field *desc, + int desc_len, + u8 *buf, + u8 *structure) +{ + int i; + + for (i = 0; i < desc_len; ++i) { + if (!desc[i].struct_size_bytes) + continue; + + if (desc[i].size_bits <= 32) { + int shift; + u32 val; + u32 mask; + __be32 *addr; + + shift = 32 - desc[i].offset_bits - desc[i].size_bits; + mask = ((1ull << desc[i].size_bits) - 1) << shift; + addr = (__be32 *) buf + desc[i].offset_words; + val = (be32_to_cpup(addr) & mask) >> shift; + value_write(desc[i].struct_offset_bytes, + desc[i].struct_size_bytes, + val, + structure); + } else if (desc[i].size_bits <= 64) { + int shift; + u64 val; + u64 mask; + __be64 *addr; + + shift = 64 - desc[i].offset_bits - desc[i].size_bits; + mask = (~0ull >> (64 - desc[i].size_bits)) << shift; + addr = (__be64 *) buf + desc[i].offset_words; + val = (be64_to_cpup(addr) & mask) >> shift; + value_write(desc[i].struct_offset_bytes, + desc[i].struct_size_bytes, + val, + structure); + } else { + if (desc[i].offset_bits % 8 || + desc[i].size_bits % 8) { + printk(KERN_WARNING "Structure field %s of size %d " + "bits is not byte-aligned\n", + desc[i].field_name, desc[i].size_bits); + } + + memcpy(structure + desc[i].struct_offset_bytes, + buf + desc[i].offset_words * 4 + + desc[i].offset_bits / 8, + desc[i].size_bits / 8); + } + } +} +EXPORT_SYMBOL(ib_unpack); diff --git a/branches/MTHCA/hw/mthca/kernel/mt_ud_header.c b/branches/MTHCA/hw/mthca/kernel/mt_ud_header.c new file mode 100644 index 00000000..d0f8432b --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mt_ud_header.c @@ -0,0 +1,448 @@ +/* + * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: ud_header.c 2928 2005-07-28 18:45:56Z sean.hefty $ + */ + +#include + +#include + +#ifdef LINUX_TO_BE_REMOVED + +#define STRUCT_FIELD(header, field) \ + .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ + .struct_size_bytes = sizeof ((struct ib_unpacked_ ## header *) 0)->field, \ + .field_name = #header ":" #field + +#else +#define STRUCT_FIELD_INIT(header, field,ow,ob,sb) \ + offsetof(struct ib_unpacked_ ## header, field), \ + sizeof ((struct ib_unpacked_ ## header *) 0)->field, \ + ow,ob,sb, \ + #header ":" #field + +#define STRUCT_FIELD_INITR(ow,ob,sb) \ + 0, 0, ow, ob, sb, "reserved" +#endif + + +#ifdef LINUX_TO_BE_REMOVED + +static const struct ib_field lrh_table[] = { + { STRUCT_FIELD(lrh, virtual_lane), + .offset_words = 0, + .offset_bits = 0, + .size_bits = 4 }, + { STRUCT_FIELD(lrh, link_version), + .offset_words = 0, + .offset_bits = 4, + .size_bits = 4 }, + { STRUCT_FIELD(lrh, service_level), + .offset_words = 0, + .offset_bits = 8, + .size_bits = 4 }, + { RESERVED, + .offset_words = 0, + .offset_bits = 12, + .size_bits = 2 }, + { STRUCT_FIELD(lrh, link_next_header), + .offset_words = 0, + .offset_bits = 14, + .size_bits = 2 }, + { STRUCT_FIELD(lrh, destination_lid), + .offset_words = 0, + .offset_bits = 16, + .size_bits = 16 }, + { RESERVED, + .offset_words = 1, + .offset_bits = 0, + .size_bits = 5 }, + { STRUCT_FIELD(lrh, packet_length), + .offset_words = 1, + .offset_bits = 5, + .size_bits = 11 }, + { STRUCT_FIELD(lrh, source_lid), + .offset_words = 1, + .offset_bits = 16, + .size_bits = 16 } +}; + +#else + +static const struct ib_field lrh_table[] = { + { STRUCT_FIELD_INIT(lrh, virtual_lane, 0, 0, 4) }, + { STRUCT_FIELD_INIT(lrh, link_version, 0, 4, 4) }, + { STRUCT_FIELD_INIT(lrh, service_level, 0, 8, 4) }, + { STRUCT_FIELD_INITR(0,12,2) }, + { STRUCT_FIELD_INIT(lrh, link_next_header, 0, 14, 2) }, + { STRUCT_FIELD_INIT(lrh, destination_lid, 0, 16, 16) }, + { STRUCT_FIELD_INITR(1,0,5) }, + { STRUCT_FIELD_INIT(lrh, packet_length, 1, 5, 11) }, + { STRUCT_FIELD_INIT(lrh, source_lid, 1, 16, 16) } +}; + +#endif + +#ifdef LINUX_TO_BE_REMOVED + +static const struct ib_field grh_table[] = { + { STRUCT_FIELD(grh, ip_version), + .offset_words = 0, + .offset_bits = 0, + .size_bits = 4 }, + { STRUCT_FIELD(grh, traffic_class), + .offset_words = 0, + .offset_bits = 4, + .size_bits = 8 }, + { STRUCT_FIELD(grh, flow_label), + .offset_words = 0, + .offset_bits = 12, + .size_bits = 20 }, + { STRUCT_FIELD(grh, payload_length), + .offset_words = 1, + .offset_bits = 0, + .size_bits = 16 }, + { STRUCT_FIELD(grh, next_header), + .offset_words = 1, + .offset_bits = 16, + .size_bits = 8 }, + { STRUCT_FIELD(grh, hop_limit), + .offset_words = 1, + .offset_bits = 24, + .size_bits = 8 }, + { STRUCT_FIELD(grh, source_gid), + .offset_words = 2, + .offset_bits = 0, + .size_bits = 128 }, + { STRUCT_FIELD(grh, destination_gid), + .offset_words = 6, + .offset_bits = 0, + .size_bits = 128 } +}; + +#else + +static const struct ib_field grh_table[] = { + { STRUCT_FIELD_INIT(grh, ip_version, 0, 0, 4) }, + { STRUCT_FIELD_INIT(grh, traffic_class, 0, 4, 8) }, + { STRUCT_FIELD_INIT(grh, flow_label, 0, 12, 20) }, + { STRUCT_FIELD_INIT(grh, payload_length, 1, 0, 16) }, + { STRUCT_FIELD_INIT(grh, next_header, 1, 16, 8) }, + { STRUCT_FIELD_INIT(grh, hop_limit, 1, 24, 8) }, + { STRUCT_FIELD_INIT(grh, source_gid, 2, 0, 128) }, + { STRUCT_FIELD_INIT(grh, destination_gid, 6, 0, 128) } +}; +#endif + + +#ifdef LINUX_TO_BE_REMOVED + +static const struct ib_field bth_table[] = { + { STRUCT_FIELD(bth, opcode), + .offset_words = 0, + .offset_bits = 0, + .size_bits = 8 }, + { STRUCT_FIELD(bth, solicited_event), + .offset_words = 0, + .offset_bits = 8, + .size_bits = 1 }, + { STRUCT_FIELD(bth, mig_req), + .offset_words = 0, + .offset_bits = 9, + .size_bits = 1 }, + { STRUCT_FIELD(bth, pad_count), + .offset_words = 0, + .offset_bits = 10, + .size_bits = 2 }, + { STRUCT_FIELD(bth, transport_header_version), + .offset_words = 0, + .offset_bits = 12, + .size_bits = 4 }, + { STRUCT_FIELD(bth, pkey), + .offset_words = 0, + .offset_bits = 16, + .size_bits = 16 }, + { RESERVED, + .offset_words = 1, + .offset_bits = 0, + .size_bits = 8 }, + { STRUCT_FIELD(bth, destination_qpn), + .offset_words = 1, + .offset_bits = 8, + .size_bits = 24 }, + { STRUCT_FIELD(bth, ack_req), + .offset_words = 2, + .offset_bits = 0, + .size_bits = 1 }, + { RESERVED, + .offset_words = 2, + .offset_bits = 1, + .size_bits = 7 }, + { STRUCT_FIELD(bth, psn), + .offset_words = 2, + .offset_bits = 8, + .size_bits = 24 } +}; + +#else + +static const struct ib_field bth_table[] = { + { STRUCT_FIELD_INIT(bth, opcode, 0, 0, 8) }, + { STRUCT_FIELD_INIT(bth, solicited_event, 0, 8, 1) }, + { STRUCT_FIELD_INIT(bth, mig_req, 0, 9, 1) }, + { STRUCT_FIELD_INIT(bth, pad_count, 0, 10, 2) }, + { STRUCT_FIELD_INIT(bth, transport_header_version, 0, 12, 4) }, + { STRUCT_FIELD_INIT(bth, pkey, 0, 16, 16) }, + { STRUCT_FIELD_INITR(1,0,8) }, + { STRUCT_FIELD_INIT(bth, destination_qpn, 1, 8, 24) }, + { STRUCT_FIELD_INIT(bth, ack_req, 2, 0, 1) }, + { STRUCT_FIELD_INITR(2,1,7) }, + { STRUCT_FIELD_INIT(bth, psn, 2, 8, 24) } +}; + +#endif + +#ifdef LINUX_TO_BE_REMOVED + +static const struct ib_field deth_table[] = { + { STRUCT_FIELD(deth, qkey), + .offset_words = 0, + .offset_bits = 0, + .size_bits = 32 }, + { RESERVED, + .offset_words = 1, + .offset_bits = 0, + .size_bits = 8 }, + { STRUCT_FIELD(deth, source_qpn), + .offset_words = 1, + .offset_bits = 8, + .size_bits = 24 } +}; + +#else + +static const struct ib_field deth_table[] = { + { STRUCT_FIELD_INIT(deth, qkey, 0, 0, 32) }, + { STRUCT_FIELD_INITR(1,0,8) }, + { STRUCT_FIELD_INIT(deth, source_qpn, 1, 8, 24) } +}; + +#endif + + +/** + * ib_ud_header_init - Initialize UD header structure + * @payload_bytes:Length of packet payload + * @grh_present:GRH flag (if non-zero, GRH will be included) + * @header:Structure to initialize + * + * ib_ud_header_init() initializes the lrh.link_version, lrh.link_next_header, + * lrh.packet_length, grh.ip_version, grh.payload_length, + * grh.next_header, bth.opcode, bth.pad_count and + * bth.transport_header_version fields of a &struct ib_ud_header given + * the payload length and whether a GRH will be included. + */ +void ib_ud_header_init(int payload_bytes, + int grh_present, + struct ib_ud_header *header) +{ + int header_len; + u16 packet_length; + + RtlZeroMemory(header, sizeof *header); + + header_len = + IB_LRH_BYTES + + IB_BTH_BYTES + + IB_DETH_BYTES; + if (grh_present) { + header_len += IB_GRH_BYTES; + } + + header->lrh.link_version = 0; + header->lrh.link_next_header = + grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL; + packet_length = (IB_LRH_BYTES + + IB_BTH_BYTES + + IB_DETH_BYTES + + payload_bytes + + 4 + /* ICRC */ + 3) / 4; /* round up */ + + header->grh_present = grh_present; + if (grh_present) { + packet_length += IB_GRH_BYTES / 4; + header->grh.ip_version = 6; + header->grh.payload_length = + cpu_to_be16((IB_BTH_BYTES + + IB_DETH_BYTES + + payload_bytes + + 4 + /* ICRC */ + 3) & ~3); /* round up */ + header->grh.next_header = 0x1b; + } + + header->lrh.packet_length = cpu_to_be16(packet_length); + + if (header->immediate_present) + header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; + else + header->bth.opcode = IB_OPCODE_UD_SEND_ONLY; + header->bth.pad_count = (4 - payload_bytes) & 3; + header->bth.transport_header_version = 0; +} +EXPORT_SYMBOL(ib_ud_header_init); + +/** + * ib_ud_header_pack - Pack UD header struct into wire format + * @header:UD header struct + * @buf:Buffer to pack into + * + * ib_ud_header_pack() packs the UD header structure @header into wire + * format in the buffer @buf. + */ +int ib_ud_header_pack(struct ib_ud_header *header, + u8 *buf) +{ + int len = 0; + + ib_pack(lrh_table, ARRAY_SIZE(lrh_table), + &header->lrh, buf); + len += IB_LRH_BYTES; + + if (header->grh_present) { + ib_pack(grh_table, ARRAY_SIZE(grh_table), + &header->grh, buf + len); + len += IB_GRH_BYTES; + } + + ib_pack(bth_table, ARRAY_SIZE(bth_table), + &header->bth, buf + len); + len += IB_BTH_BYTES; + + ib_pack(deth_table, ARRAY_SIZE(deth_table), + &header->deth, buf + len); + len += IB_DETH_BYTES; + + if (header->immediate_present) { + memcpy(buf + len, &header->immediate_data, sizeof header->immediate_data); + len += sizeof header->immediate_data; + } + + return len; +} +EXPORT_SYMBOL(ib_ud_header_pack); + +/** + * ib_ud_header_unpack - Unpack UD header struct from wire format + * @header:UD header struct + * @buf:Buffer to pack into + * + * ib_ud_header_pack() unpacks the UD header structure @header from wire + * format in the buffer @buf. + */ +int ib_ud_header_unpack(u8 *buf, + struct ib_ud_header *header) +{ + ib_unpack(lrh_table, ARRAY_SIZE(lrh_table), + buf, &header->lrh); + buf += IB_LRH_BYTES; + + if (header->lrh.link_version != 0) { + printk(KERN_WARNING "Invalid LRH.link_version %d\n", + header->lrh.link_version); + return -EINVAL; + } + + switch (header->lrh.link_next_header) { + case IB_LNH_IBA_LOCAL: + header->grh_present = 0; + break; + + case IB_LNH_IBA_GLOBAL: + header->grh_present = 1; + ib_unpack(grh_table, ARRAY_SIZE(grh_table), + buf, &header->grh); + buf += IB_GRH_BYTES; + + if (header->grh.ip_version != 6) { + printk(KERN_WARNING "Invalid GRH.ip_version %d\n", + header->grh.ip_version); + return -EINVAL; + } + if (header->grh.next_header != 0x1b) { + printk(KERN_WARNING "Invalid GRH.next_header 0x%02x\n", + header->grh.next_header); + return -EINVAL; + } + break; + + default: + printk(KERN_WARNING "Invalid LRH.link_next_header %d\n", + header->lrh.link_next_header); + return -EINVAL; + } + + ib_unpack(bth_table, ARRAY_SIZE(bth_table), + buf, &header->bth); + buf += IB_BTH_BYTES; + + switch (header->bth.opcode) { + case IB_OPCODE_UD_SEND_ONLY: + header->immediate_present = 0; + break; + case IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE: + header->immediate_present = 1; + break; + default: + printk(KERN_WARNING "Invalid BTH.opcode 0x%02x\n", + header->bth.opcode); + return -EINVAL; + } + + if (header->bth.transport_header_version != 0) { + printk(KERN_WARNING "Invalid BTH.transport_header_version %d\n", + header->bth.transport_header_version); + return -EINVAL; + } + + ib_unpack(deth_table, ARRAY_SIZE(deth_table), + buf, &header->deth); + buf += IB_DETH_BYTES; + + if (header->immediate_present) + memcpy(&header->immediate_data, buf, sizeof header->immediate_data); + + return 0; +} +EXPORT_SYMBOL(ib_ud_header_unpack); diff --git a/branches/MTHCA/hw/mthca/kernel/mt_verbs.c b/branches/MTHCA/hw/mthca/kernel/mt_verbs.c new file mode 100644 index 00000000..da54c0a6 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mt_verbs.c @@ -0,0 +1,535 @@ +/* + * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2004 Infinicon Corporation. All rights reserved. + * Copyright (c) 2004 Intel Corporation. All rights reserved. + * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: verbs.c 2934 2005-07-29 17:31:49Z roland $ + */ + +#include +#include + +/* Protection domains */ + +struct ib_pd *ib_alloc_pd(struct ib_device *device) +{ + struct ib_pd *pd; + + pd = device->alloc_pd(device, NULL, NULL); + + if (!IS_ERR(pd)) { + pd->device = device; + pd->uobject = NULL; + atomic_set(&pd->usecnt, 0); + } + + return pd; +} +EXPORT_SYMBOL(ib_alloc_pd); + +int ib_dealloc_pd(struct ib_pd *pd) +{ + if (atomic_read(&pd->usecnt)) + return -EBUSY; + + return pd->device->dealloc_pd(pd); +} +EXPORT_SYMBOL(ib_dealloc_pd); + +/* Address handles */ + +struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) +{ + struct ib_ah *ah; + + ah = pd->device->create_ah(pd, ah_attr); + + if (!IS_ERR(ah)) { + ah->device = pd->device; + ah->pd = pd; + ah->uobject = NULL; + atomic_inc(&pd->usecnt); + } + + return ah; +} +EXPORT_SYMBOL(ib_create_ah); + +struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc, + struct ib_grh *grh, u8 port_num) +{ + struct ib_ah_attr ah_attr; + u32 flow_class; + u16 gid_index; + int ret; + + memset(&ah_attr, 0, sizeof ah_attr); + ah_attr.dlid = wc->slid; + ah_attr.sl = wc->sl; + ah_attr.src_path_bits = wc->dlid_path_bits; + ah_attr.port_num = port_num; + + if (wc->wc_flags & IB_WC_GRH) { + ah_attr.ah_flags = IB_AH_GRH; + ah_attr.grh.dgid = grh->dgid; + + ret = ib_find_cached_gid(pd->device, &grh->sgid, &port_num, + &gid_index); + if (ret) + return ERR_PTR(ret); + + ah_attr.grh.sgid_index = (u8) gid_index; + flow_class = be32_to_cpu(grh->version_tclass_flow); + ah_attr.grh.flow_label = flow_class & 0xFFFFF; + ah_attr.grh.traffic_class = (flow_class >> 20) & 0xFF; + ah_attr.grh.hop_limit = grh->hop_limit; + } + + return ib_create_ah(pd, &ah_attr); +} +EXPORT_SYMBOL(ib_create_ah_from_wc); + +int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) +{ + return ah->device->modify_ah ? + ah->device->modify_ah(ah, ah_attr) : + -ENOSYS; +} +EXPORT_SYMBOL(ib_modify_ah); + +int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) +{ + return ah->device->query_ah ? + ah->device->query_ah(ah, ah_attr) : + -ENOSYS; +} +EXPORT_SYMBOL(ib_query_ah); + +int ib_destroy_ah(struct ib_ah *ah) +{ + struct ib_pd *pd; + int ret; + + pd = ah->pd; + ret = ah->device->destroy_ah(ah); + if (!ret) + atomic_dec(&pd->usecnt); + + return ret; +} +EXPORT_SYMBOL(ib_destroy_ah); + +/* Shared receive queues */ + +struct ib_srq *ib_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *srq_init_attr) +{ + struct ib_srq *srq; + + if (!pd->device->create_srq) + return ERR_PTR(-ENOSYS); + + srq = pd->device->create_srq(pd, srq_init_attr, NULL); + + if (!IS_ERR(srq)) { + srq->device = pd->device; + srq->pd = pd; + srq->uobject = NULL; + srq->event_handler = srq_init_attr->event_handler; + srq->srq_context = srq_init_attr->srq_context; + atomic_inc(&pd->usecnt); + atomic_set(&srq->usecnt, 0); + } + + return srq; +} +EXPORT_SYMBOL(ib_create_srq); + +int ib_modify_srq(struct ib_srq *srq, + struct ib_srq_attr *srq_attr, + enum ib_srq_attr_mask srq_attr_mask) +{ + return srq->device->modify_srq(srq, srq_attr, srq_attr_mask); +} +EXPORT_SYMBOL(ib_modify_srq); + +int ib_query_srq(struct ib_srq *srq, + struct ib_srq_attr *srq_attr) +{ + return srq->device->query_srq ? + srq->device->query_srq(srq, srq_attr) : -ENOSYS; +} +EXPORT_SYMBOL(ib_query_srq); + +int ib_destroy_srq(struct ib_srq *srq) +{ + struct ib_pd *pd; + int ret; + + if (atomic_read(&srq->usecnt)) + return -EBUSY; + + pd = srq->pd; + + ret = srq->device->destroy_srq(srq); + if (!ret) + atomic_dec(&pd->usecnt); + + return ret; +} +EXPORT_SYMBOL(ib_destroy_srq); + +/* Queue pairs */ + +struct ib_qp *ib_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *qp_init_attr) +{ + struct ib_qp *qp; + + qp = pd->device->create_qp(pd, qp_init_attr, NULL); + + if (!IS_ERR(qp)) { + qp->device = pd->device; + qp->pd = pd; + qp->send_cq = qp_init_attr->send_cq; + qp->recv_cq = qp_init_attr->recv_cq; + qp->srq = qp_init_attr->srq; + qp->uobject = NULL; + qp->event_handler = qp_init_attr->event_handler; + qp->qp_context = qp_init_attr->qp_context; + qp->qp_type = qp_init_attr->qp_type; + atomic_inc(&pd->usecnt); + atomic_inc(&qp_init_attr->send_cq->usecnt); + atomic_inc(&qp_init_attr->recv_cq->usecnt); + if (qp_init_attr->srq) + atomic_inc(&qp_init_attr->srq->usecnt); + } + + return qp; +} +EXPORT_SYMBOL(ib_create_qp); + +int ib_modify_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask) +{ + return qp->device->modify_qp(qp, qp_attr, qp_attr_mask); +} +EXPORT_SYMBOL(ib_modify_qp); + +int ib_query_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr) +{ + return qp->device->query_qp ? + qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) : + -ENOSYS; +} +EXPORT_SYMBOL(ib_query_qp); + +int ib_destroy_qp(struct ib_qp *qp) +{ + struct ib_pd *pd; + struct ib_cq *scq, *rcq; + struct ib_srq *srq; + int ret; + + pd = qp->pd; + scq = qp->send_cq; + rcq = qp->recv_cq; + srq = qp->srq; + + ret = qp->device->destroy_qp(qp); + if (!ret) { + atomic_dec(&pd->usecnt); + atomic_dec(&scq->usecnt); + atomic_dec(&rcq->usecnt); + if (srq) + atomic_dec(&srq->usecnt); + } + + return ret; +} +EXPORT_SYMBOL(ib_destroy_qp); + +/* Completion queues */ + +struct ib_cq *ib_create_cq(struct ib_device *device, + ib_comp_handler comp_handler, + void (*event_handler)(struct ib_event *, void *), + void *cq_context, int cqe) +{ + struct ib_cq *cq; + + cq = device->create_cq(device, cqe, NULL, NULL); + + if (!IS_ERR(cq)) { + cq->device = device; + cq->uobject = NULL; + cq->comp_handler = comp_handler; + cq->event_handler = event_handler; + cq->cq_context = cq_context; + atomic_set(&cq->usecnt, 0); + } + + return cq; +} +EXPORT_SYMBOL(ib_create_cq); + +int ib_destroy_cq(struct ib_cq *cq) +{ + if (atomic_read(&cq->usecnt)) + return -EBUSY; + + return cq->device->destroy_cq(cq); +} +EXPORT_SYMBOL(ib_destroy_cq); + +int ib_resize_cq(struct ib_cq *cq, + int cqe) +{ + int ret; + + if (!cq->device->resize_cq) + return -ENOSYS; + + ret = cq->device->resize_cq(cq, &cqe); + if (!ret) + cq->cqe = cqe; + + return ret; +} +EXPORT_SYMBOL(ib_resize_cq); + +/* Memory regions */ + +struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags) +{ + struct ib_mr *mr; + + mr = pd->device->get_dma_mr(pd, mr_access_flags); + + if (!IS_ERR(mr)) { + mr->device = pd->device; + mr->pd = pd; + mr->uobject = NULL; + atomic_inc(&pd->usecnt); + atomic_set(&mr->usecnt, 0); + } + + return mr; +} +EXPORT_SYMBOL(ib_get_dma_mr); + +struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, + int mr_access_flags, + u64 *iova_start) +{ + struct ib_mr *mr; + + mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf, + mr_access_flags, iova_start); + + if (!IS_ERR(mr)) { + mr->device = pd->device; + mr->pd = pd; + mr->uobject = NULL; + atomic_inc(&pd->usecnt); + atomic_set(&mr->usecnt, 0); + } + + return mr; +} +EXPORT_SYMBOL(ib_reg_phys_mr); + +int ib_rereg_phys_mr(struct ib_mr *mr, + int mr_rereg_mask, + struct ib_pd *pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, + int mr_access_flags, + u64 *iova_start) +{ + struct ib_pd *old_pd; + int ret; + + if (!mr->device->rereg_phys_mr) + return -ENOSYS; + + if (atomic_read(&mr->usecnt)) + return -EBUSY; + + old_pd = mr->pd; + + ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd, + phys_buf_array, num_phys_buf, + mr_access_flags, iova_start); + + if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) { + atomic_dec(&old_pd->usecnt); + atomic_inc(&pd->usecnt); + } + + return ret; +} +EXPORT_SYMBOL(ib_rereg_phys_mr); + +int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr) +{ + return mr->device->query_mr ? + mr->device->query_mr(mr, mr_attr) : -ENOSYS; +} +EXPORT_SYMBOL(ib_query_mr); + +int ib_dereg_mr(struct ib_mr *mr) +{ + struct ib_pd *pd; + int ret; + + if (atomic_read(&mr->usecnt)) + return -EBUSY; + + pd = mr->pd; + ret = mr->device->dereg_mr(mr); + if (!ret) + atomic_dec(&pd->usecnt); + + return ret; +} +EXPORT_SYMBOL(ib_dereg_mr); + +/* Memory windows */ + +struct ib_mw *ib_alloc_mw(struct ib_pd *pd) +{ + struct ib_mw *mw; + + if (!pd->device->alloc_mw) + return ERR_PTR(-ENOSYS); + + mw = pd->device->alloc_mw(pd); + if (!IS_ERR(mw)) { + mw->device = pd->device; + mw->pd = pd; + mw->uobject = NULL; + atomic_inc(&pd->usecnt); + } + + return mw; +} +EXPORT_SYMBOL(ib_alloc_mw); + +int ib_dealloc_mw(struct ib_mw *mw) +{ + struct ib_pd *pd; + int ret; + + pd = mw->pd; + ret = mw->device->dealloc_mw(mw); + if (!ret) + atomic_dec(&pd->usecnt); + + return ret; +} +EXPORT_SYMBOL(ib_dealloc_mw); + +/* "Fast" memory regions */ + +struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, + int mr_access_flags, + struct ib_fmr_attr *fmr_attr) +{ + struct ib_fmr *fmr; + + if (!pd->device->alloc_fmr) + return ERR_PTR(-ENOSYS); + + fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); + if (!IS_ERR(fmr)) { + fmr->device = pd->device; + fmr->pd = pd; + atomic_inc(&pd->usecnt); + } + + return fmr; +} +EXPORT_SYMBOL(ib_alloc_fmr); + +int ib_unmap_fmr(struct list_head *fmr_list) +{ + struct ib_fmr *fmr; + + if (list_empty(fmr_list)) + return 0; + + fmr = list_entry(fmr_list->next, struct ib_fmr, list); + return fmr->device->unmap_fmr(fmr_list); +} +EXPORT_SYMBOL(ib_unmap_fmr); + +int ib_dealloc_fmr(struct ib_fmr *fmr) +{ + struct ib_pd *pd; + int ret; + + pd = fmr->pd; + ret = fmr->device->dealloc_fmr(fmr); + if (!ret) + atomic_dec(&pd->usecnt); + + return ret; +} +EXPORT_SYMBOL(ib_dealloc_fmr); + +/* Multicast groups */ + +int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) +{ + return qp->device->attach_mcast ? + qp->device->attach_mcast(qp, gid, lid) : + -ENOSYS; +} +EXPORT_SYMBOL(ib_attach_mcast); + +int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) +{ + return qp->device->detach_mcast ? + qp->device->detach_mcast(qp, gid, lid) : + -ENOSYS; +} +EXPORT_SYMBOL(ib_detach_mcast); diff --git a/branches/MTHCA/hw/mthca/kernel/mthca.h b/branches/MTHCA/hw/mthca/kernel/mthca.h new file mode 100644 index 00000000..456d47ba --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca.h @@ -0,0 +1,12 @@ +#ifndef MTHCA_H +#define MTHCA_H + +#include "hca_driver.h" +#include "mthca_dev.h" + +NTSTATUS mthca_init_one(hca_dev_ext_t *ext); +void mthca_remove_one(hca_dev_ext_t *ext); +int mthca_get_dev_info(struct mthca_dev *mdev, __be64 *node_guid, u32 *hw_id); + +#endif + diff --git a/branches/MTHCA/hw/mthca/kernel/mthca.inf b/branches/MTHCA/hw/mthca/kernel/mthca.inf new file mode 100644 index 00000000..2fb94d5e --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca.inf @@ -0,0 +1,194 @@ +; Mellanox Technologies InfiniBand HCAs. +; Copyright 2005 Mellanox Technologies all Rights Reserved. + +[Version] +Signature="$Windows NT$" +Class=InfiniBandHca +ClassGUID=%HcaClassGuid% +Provider=%MTL% +CatalogFile=mthca.cat +DriverVer=09/10/2005,1.0.3 + +; ================= Destination directory section ===================== + +[DestinationDirs] +DefaultDestDir=%DIRID_DRIVERS% +ClassCopyFiles=%DIRID_SYSTEM% +MTHCA.UMCopyFiles=%DIRID_SYSTEM% +MTHCA.WOW64CopyFiles=%DIRID_SYSTEM_X86% + +; ================= Class Install section ===================== + +[ClassInstall32] +CopyFiles=ClassCopyFiles +AddReg=ClassAddReg + +[ClassCopyFiles] +IbInstaller.dll + +[ClassAddReg] +HKR,,,,"InfiniBand Host Channel Adapters" +HKR,,Icon,,-5 +HKLM,"System\CurrentControlSet\Control\CoDeviceInstallers", \ + %HcaClassGuid%,%REG_MULTI_SZ_APPEND%, "IbInstaller.dll,IbCoInstaller" + +; ================= Device Install section ===================== + +[SourceDisksNames.x86] +1=%DiskId%,,,\x86 + +[SourceDisksNames.amd64] +1=%DiskId%,,,\amd64 + +[SourceDisksNames.ia64] +1=%DiskId%,,,\ia64 + +[SourceDisksFiles] +IbInstaller.dll=1 +ibal.sys=1 +mthca.sys=1 +; 2 lines excluded temporary +;mthcau.dll=1 +;mthcaud.dll=1 + +[SourceDisksFiles.amd64] +IbInstaller.dll=1 +ibal.sys=1 +mthca.sys=1 +; 2 lines excluded temporary +;mthcau.dll=1 +;mthcaud.dll=1 +;uvpd32.dll=1 +;uvpd32d.dll=1 + +[SourceDisksFiles.ia64] +IbInstaller.dll=1 +ibal.sys=1 +mthca.sys=1 +; 2 lines excluded temporary +;mthcau.dll=1 +;mthcaud.dll=1 +;uvpd32.dll=1 +;uvpd32d.dll=1 + +[Manufacturer] +%MTL% = HCA.DeviceSection,ntx86,ntamd64,ntia64 + +[HCA.DeviceSection] +; empty since we don't support W9x/Me + +[HCA.DeviceSection.ntx86] +%MT23108.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A44 +%MT25208.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6278 +%MT25218.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6282 +%MT24204.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8C +%MT25204.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6274 + + +[HCA.DeviceSection.ntamd64] +%MT23108.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A44 +%MT25208.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6278 +%MT25218.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6282 +%MT24204.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8C +%MT25204.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6274 + +[HCA.DeviceSection.ntia64] +%MT23108.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A44 +%MT25208.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6278 +%MT25218.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6282 +%MT24204.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8C +%MT25204.DeviceDesc% = MTHCA.DDInstall, PCI\VEN_15B3&DEV_6274 + +[MTHCA.DDInstall.ntx86] +CopyFiles = MTHCA.CopyFiles +CopyFiles = MTHCA.UMCopyFiles + +[MTHCA.DDInstall.ntamd64] +CopyFiles = MTHCA.CopyFiles +CopyFiles = MTHCA.UMCopyFiles +CopyFiles = MTHCA.WOW64CopyFiles + +[MTHCA.DDInstall.ntia64] +CopyFiles = MTHCA.CopyFiles +CopyFiles = MTHCA.UMCopyFiles +CopyFiles = MTHCA.WOW64CopyFiles + +[MTHCA.DDInstall.ntx86.Services] +AddService = mthca,%SPSVCINST_ASSOCSERVICE%,MTHCA.ServiceInstall +AddService = ibal,%SPSVCINST_NULL%,Ibal.ServiceInstall + +[MTHCA.DDInstall.ntamd64.Services] +AddService = mthca,%SPSVCINST_ASSOCSERVICE%,MTHCA.ServiceInstall +AddService = ibal,%SPSVCINST_NULL%,Ibal.ServiceInstall + +[MTHCA.DDInstall.ntia64.Services] +AddService = mthca,%SPSVCINST_ASSOCSERVICE%,MTHCA.ServiceInstall +AddService = ibal,%SPSVCINST_NULL%,Ibal.ServiceInstall + +[MTHCA.CopyFiles] +ibal.sys +mthca.sys + +[MTHCA.UMCopyFiles] +; 2 lines excluded temporary +;mthcau.dll,,,2 +;mthcaud.dll,,,2 + +[MTHCA.WOW64CopyFiles] +; 2 lines excluded temporary +;mthcau.dll,uvpd32.dll,,2 +;mthcaud.dll,uvpd32d.dll,,2 + +; +; ============= Service Install section ============== +; + +[MTHCA.ServiceInstall] +DisplayName = %MTHCA.ServiceDesc% +ServiceType = %SERVICE_KERNEL_DRIVER% +StartType = %SERVICE_DEMAND_START% +ErrorControl = %SERVICE_ERROR_NORMAL% +ServiceBinary = %12%\mthca.sys +LoadOrderGroup = extended base +AddReg = MTHCA.ParamsReg + +[Ibal.ServiceInstall] +DisplayName = %Ibal.ServiceDesc% +ServiceType = %SERVICE_KERNEL_DRIVER% +StartType = %SERVICE_DEMAND_START% +ErrorControl = %SERVICE_ERROR_NORMAL% +ServiceBinary = %12%\ibal.sys +AddReg = Ibal.ParamsReg + +[MTHCA.ParamsReg] +HKR,"Parameters","DebugFlags",%REG_DWORD%,0x80000000 + +[Ibal.ParamsReg] +HKR,"Parameters","DebugFlags",%REG_DWORD_NO_CLOBBER%,0x80000000 +HKR,"Parameters","SmiPollInterval",%REG_DWORD_NO_CLOBBER%,20000 +HKR,"Parameters","IocQueryTimeout",%REG_DWORD_NO_CLOBBER%,250 +HKR,"Parameters","IocQueryRetries",%REG_DWORD_NO_CLOBBER%,4 +HKR,"Parameters","IocPollInterval",%REG_DWORD_NO_CLOBBER%,30000 + +[Strings] +HcaClassGuid = "{58517E00-D3CF-40c9-A679-CEE5752F4491}" +MTL="Mellanox Technologies Ltd." +Ibal.ServiceDesc = "Mellanox InfiniBand Access Layer" +MTHCA.ServiceDesc = "Driver for Mellanox InfiniHost Devices" +MT23108.DeviceDesc="InfiniHost (MT23108) - Mellanox InfiniBand HCA" +MT25208.DeviceDesc="InfiniHost (MT25208) - Mellanox InfiniBand HCA for PCI Express" +MT25218.DeviceDesc="InfiniHost III Ex (MT25218) - Mellanox InfiniBand HCA for PCI Express" +MT24204.DeviceDesc="InfiniHost III Lx (MT24204) - Mellanox InfiniBand HCA for PCI Express" +MT25204.DeviceDesc="InfiniHost III Lx (MT25204) - Mellanox InfiniBand HCA for PCI Express" +DiskId = "Mellanox InfiniBand HCA installation disk" +SPSVCINST_NULL = 0x0 +SPSVCINST_ASSOCSERVICE = 0x00000002 +SERVICE_KERNEL_DRIVER = 1 +SERVICE_DEMAND_START = 3 +SERVICE_ERROR_NORMAL = 1 +REG_DWORD = 0x00010001 +REG_DWORD_NO_CLOBBER = 0x00010003 +REG_MULTI_SZ_APPEND = 0x00010008 +DIRID_SYSTEM = 11 +DIRID_DRIVERS = 12 +DIRID_SYSTEM_X86 = 16425 diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_allocator.c b/branches/MTHCA/hw/mthca/kernel/mthca_allocator.c new file mode 100644 index 00000000..d03ed81d --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_allocator.c @@ -0,0 +1,291 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_allocator.c 3047 2005-08-10 03:59:35Z roland $ + */ + +#include "mthca_dev.h" + +/* Trivial bitmap-based allocator */ +u32 mthca_alloc(struct mthca_alloc *alloc) +{ + u32 obj; + + spin_lock(&alloc->lock); + obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last); + if (obj >= alloc->max) { + alloc->top = (alloc->top + alloc->max) & alloc->mask; + obj = find_first_zero_bit(alloc->table, alloc->max); + } + + if (obj < alloc->max) { + set_bit(obj, alloc->table); + obj |= alloc->top; + } else + obj = -1; + + spin_unlock(&alloc->lock); + + return obj; +} + +void mthca_free(struct mthca_alloc *alloc, u32 obj) +{ + obj &= alloc->max - 1; + spin_lock(&alloc->lock); + clear_bit(obj, alloc->table); + alloc->last = MIN(alloc->last, obj); + alloc->top = (alloc->top + alloc->max) & alloc->mask; + spin_unlock(&alloc->lock); +} + +int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask, + u32 reserved) +{ + int i; + + /* num must be a power of 2 */ + if (num != 1 << (ffs(num) - 1)) + return -EINVAL; + + alloc->last = 0; + alloc->top = 0; + alloc->max = num; + alloc->mask = mask; + spin_lock_init(&alloc->lock); + alloc->table = kmalloc(BITS_TO_LONGS(num) * sizeof (long), + GFP_KERNEL); + if (!alloc->table) + return -ENOMEM; + + bitmap_zero(alloc->table, num); + for (i = 0; i < (int)reserved; ++i) + set_bit(i, alloc->table); + + return 0; +} + +void mthca_alloc_cleanup(struct mthca_alloc *alloc) +{ + kfree(alloc->table); +} + +/* + * Array of pointers with lazy allocation of leaf pages. Callers of + * _get, _set and _clear methods must use a lock or otherwise + * serialize access to the array. + */ + +void *mthca_array_get(struct mthca_array *array, int index) +{ + int p = (index * sizeof (void *)) >> PAGE_SHIFT; + + if (array->page_list[p].page) { + int i = index & (PAGE_SIZE / sizeof (void *) - 1); + return array->page_list[p].page[i]; + } else + return NULL; +} + +int mthca_array_set(struct mthca_array *array, int index, void *value) +{ + int p = (index * sizeof (void *)) >> PAGE_SHIFT; + + /* Allocate with GFP_ATOMIC because we'll be called with locks held. */ + if (!array->page_list[p].page) + array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC); + + if (!array->page_list[p].page) + return -ENOMEM; + + array->page_list[p].page[index & (PAGE_SIZE / sizeof (void *) - 1)] = + value; + ++array->page_list[p].used; + + return 0; +} + +void mthca_array_clear(struct mthca_array *array, int index) +{ + int p = (index * sizeof (void *)) >> PAGE_SHIFT; + + if (--array->page_list[p].used == 0) { + free_page((void*) array->page_list[p].page); + array->page_list[p].page = NULL; + } + + if (array->page_list[p].used < 0) + pr_debug("Array %p index %d page %d with ref count %d < 0\n", + array, index, p, array->page_list[p].used); +} + +int mthca_array_init(struct mthca_array *array, int nent) +{ + int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; + int i; + + array->page_list = kmalloc(npage * sizeof *array->page_list, GFP_KERNEL); + if (!array->page_list) + return -ENOMEM; + + for (i = 0; i < npage; ++i) { + array->page_list[i].page = NULL; + array->page_list[i].used = 0; + } + + return 0; +} + +void mthca_array_cleanup(struct mthca_array *array, int nent) +{ + int i; + + for (i = 0; i < (int)((nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE); ++i) + free_page((void*) array->page_list[i].page); + + kfree(array->page_list); +} + +/* + * Handling for queue buffers -- we allocate a bunch of memory and + * register it in a memory region at HCA virtual address 0. If the + * requested size is > max_direct, we split the allocation into + * multiple pages, so we don't require too much contiguous memory. + */ + +int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct, + union mthca_buf *buf, int *is_direct, struct mthca_pd *pd, + int hca_write, struct mthca_mr *mr) +{ + int err = -ENOMEM; + int npages, shift; + u64 *dma_list = NULL; + dma_addr_t t; + int i; + + if (size <= max_direct) { + *is_direct = 1; + npages = 1; + shift = get_order(size) + PAGE_SHIFT; + + buf->direct.buf = dma_alloc_coherent(dev, + size, &t, GFP_KERNEL); + if (!buf->direct.buf) + return -ENOMEM; + buf->direct.mapping = t; /* save dma_addr_t */ + + RtlZeroMemory(buf->direct.buf, size); + + while (t & ((1 << shift) - 1)) { + --shift; + npages *= 2; + } + + dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); + if (!dma_list) + goto err_free; + + for (i = 0; i < npages; ++i) + dma_list[i] = t + i * (1 << shift); + } else { + *is_direct = 0; + npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; + shift = PAGE_SHIFT; + + dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); + if (!dma_list) + return -ENOMEM; + + buf->page_list = kmalloc(npages * sizeof *buf->page_list, + GFP_KERNEL); + if (!buf->page_list) + goto err_out; + + for (i = 0; i < npages; ++i) + buf->page_list[i].buf = NULL; + + for (i = 0; i < npages; ++i) { + buf->page_list[i].buf = + dma_alloc_coherent(dev, PAGE_SIZE, + &t, GFP_KERNEL); + if (!buf->page_list[i].buf) + goto err_free; + + dma_list[i] = t; + buf->page_list[i].mapping = t; + + RtlZeroMemory(buf->page_list[i].buf, PAGE_SIZE); + } + } + + err = mthca_mr_alloc_phys(dev, pd->pd_num, + dma_list, shift, npages, + 0, size, + MTHCA_MPT_FLAG_LOCAL_READ | + (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0), + mr); + if (err) + goto err_free; + + kfree(dma_list); + + return 0; + +err_free: + mthca_buf_free(dev, size, buf, *is_direct, NULL); + +err_out: + kfree(dma_list); + + return err; +} + +void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf, + int is_direct, struct mthca_mr *mr) +{ + int i; + + if (mr) + mthca_free_mr(dev, mr); + + if (is_direct) { + dma_free_coherent(dev, size, buf->direct.buf, + buf->direct.mapping); + } + else { + for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) { + dma_free_coherent(dev, PAGE_SIZE, + buf->page_list[i].buf, + buf->page_list[i].mapping); + } + kfree(buf->page_list); + } +} diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_av.c b/branches/MTHCA/hw/mthca/kernel/mthca_av.c new file mode 100644 index 00000000..48cc80b6 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_av.c @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_av.c 2928 2005-07-28 18:45:56Z sean.hefty $ + */ + +#include +#include + +#include "mthca_dev.h" + +struct mthca_av { + __be32 port_pd; + u8 reserved1; + u8 g_slid; + __be16 dlid; + u8 reserved2; + u8 gid_index; + u8 msg_sr; + u8 hop_limit; + __be32 sl_tclass_flowlabel; + __be32 dgid[4]; +}; + +int mthca_create_ah(struct mthca_dev *dev, + struct mthca_pd *pd, + struct ib_ah_attr *ah_attr, + struct mthca_ah *ah) +{ + u32 index = -1; + struct mthca_av *av = NULL; + + ah->type = MTHCA_AH_PCI_POOL; + + if (mthca_is_memfree(dev)) { + ah->av = kmalloc(sizeof *ah->av, GFP_ATOMIC); + if (!ah->av) + return -ENOMEM; + + ah->type = MTHCA_AH_KMALLOC; + av = ah->av; + } else if (!atomic_read(&pd->sqp_count) && + !(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) { + index = mthca_alloc(&dev->av_table.alloc); + + /* fall back to allocate in host memory */ + if (index == -1) + goto on_hca_fail; + + av = kmalloc(sizeof *av, GFP_ATOMIC); + if (!av) + goto on_hca_fail; + + ah->type = MTHCA_AH_ON_HCA; + ah->avdma = dev->av_table.ddr_av_base + + index * MTHCA_AV_SIZE; + } + +on_hca_fail: + if (ah->type == MTHCA_AH_PCI_POOL) { + ah->av = pci_pool_alloc(dev->av_table.pool, + SLAB_ATOMIC, &ah->avdma); + if (!ah->av) + return -ENOMEM; + + av = ah->av; + } + + ah->key = pd->ntmr.ibmr.lkey; + + RtlZeroMemory(av, MTHCA_AV_SIZE); + + av->port_pd = cpu_to_be32(pd->pd_num | (ah_attr->port_num << 24)); + av->g_slid = ah_attr->src_path_bits; + av->dlid = cpu_to_be16(ah_attr->dlid); + av->msg_sr = (3 << 4) | /* 2K message */ + ah_attr->static_rate; + av->sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); + if (ah_attr->ah_flags & IB_AH_GRH) { + av->g_slid |= 0x80; + av->gid_index = (ah_attr->port_num - 1) * dev->limits.gid_table_len + + ah_attr->grh.sgid_index; + av->hop_limit = ah_attr->grh.hop_limit; + av->sl_tclass_flowlabel |= + cpu_to_be32((ah_attr->grh.traffic_class << 20) | + ah_attr->grh.flow_label); + memcpy(av->dgid, ah_attr->grh.dgid.raw, 16); + } else { + /* Arbel workaround -- low byte of GID must be 2 */ + av->dgid[3] = cpu_to_be32(2); + } + + if (0) { + int j; + + mthca_dbg(dev, "Created UDAV at %p/%08lx:\n", + av, (unsigned long) ah->avdma); + for (j = 0; j < 8; ++j) + printk(KERN_DEBUG " [%2x] %08x\n", + j * 4, be32_to_cpu(((__be32 *) av)[j])); + } + + if (ah->type == MTHCA_AH_ON_HCA) { + memcpy_toio((u8*)dev->av_table.av_map + index * MTHCA_AV_SIZE, + av, MTHCA_AV_SIZE); + kfree(av); + } + + return 0; +} + +int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah) +{ + switch (ah->type) { + case MTHCA_AH_ON_HCA: + mthca_free(&dev->av_table.alloc, + (u32)( (ah->avdma - dev->av_table.ddr_av_base) /MTHCA_AV_SIZE)); + break; + + case MTHCA_AH_PCI_POOL: + pci_pool_free(dev->av_table.pool, ah->av, ah->avdma); + break; + + case MTHCA_AH_KMALLOC: + kfree(ah->av); + break; + } + + return 0; +} + +int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah, + struct ib_ud_header *header) +{ + if (ah->type == MTHCA_AH_ON_HCA) + return -EINVAL; + + header->lrh.service_level = (u8)be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28; + header->lrh.destination_lid = (u8)ah->av->dlid; + header->lrh.source_lid = cpu_to_be16(ah->av->g_slid & 0x7f); + if (ah->av->g_slid & 0x80) { + header->grh_present = 1; + header->grh.traffic_class = + (u8)((be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 20) & 0xff); + header->grh.flow_label = + (u8)(ah->av->sl_tclass_flowlabel & cpu_to_be32(0xfffff)); + ib_get_cached_gid(&dev->ib_dev, + (u8)be32_to_cpu(ah->av->port_pd) >> 24, + ah->av->gid_index, + &header->grh.source_gid); + memcpy(header->grh.destination_gid.raw, + ah->av->dgid, 16); + } else { + header->grh_present = 0; + } + + return 0; +} + +int __devinit mthca_init_av_table(struct mthca_dev *dev) +{ + int err; + + if (mthca_is_memfree(dev)) + return 0; + + err = mthca_alloc_init(&dev->av_table.alloc, + dev->av_table.num_ddr_avs, + dev->av_table.num_ddr_avs - 1, + 0); + if (err) + return err; + + dev->av_table.pool = pci_pool_create("mthca_av", dev, + MTHCA_AV_SIZE, + MTHCA_AV_SIZE, 0); + if (!dev->av_table.pool) + goto out_free_alloc; + + if (!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) { + dev->av_table.av_map = ioremap(pci_resource_start(dev, HCA_BAR_TYPE_DDR) + + dev->av_table.ddr_av_base - + dev->ddr_start, + dev->av_table.num_ddr_avs * + MTHCA_AV_SIZE, + &dev->av_table.av_map_size); + if (!dev->av_table.av_map) + goto out_free_pool; + } else + dev->av_table.av_map = NULL; + + return 0; + + out_free_pool: + pci_pool_destroy(dev->av_table.pool); + + out_free_alloc: + mthca_alloc_cleanup(&dev->av_table.alloc); + return -ENOMEM; +} + +void __devexit mthca_cleanup_av_table(struct mthca_dev *dev) +{ + if (mthca_is_memfree(dev)) + return; + + if (dev->av_table.av_map) + iounmap(dev->av_table.av_map, dev->av_table.av_map_size); + pci_pool_destroy(dev->av_table.pool); + mthca_alloc_cleanup(&dev->av_table.alloc); +} diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_cmd.c b/branches/MTHCA/hw/mthca/kernel/mthca_cmd.c new file mode 100644 index 00000000..ab639d80 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_cmd.c @@ -0,0 +1,1826 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_cmd.c 3047 2005-08-10 03:59:35Z roland $ + */ + +#include + +#include "mthca_dev.h" +#include "mthca_config_reg.h" +#include "mthca_cmd.h" +#include "mthca_memfree.h" + +#define CMD_POLL_TOKEN 0xffff + +enum { + HCR_IN_PARAM_OFFSET = 0x00, + HCR_IN_MODIFIER_OFFSET = 0x08, + HCR_OUT_PARAM_OFFSET = 0x0c, + HCR_TOKEN_OFFSET = 0x14, + HCR_STATUS_OFFSET = 0x18, + + HCR_OPMOD_SHIFT = 12, + HCA_E_BIT = 22, + HCR_GO_BIT = 23 +}; + +enum { + /* initialization and general commands */ + CMD_SYS_EN = 0x1, + CMD_SYS_DIS = 0x2, + CMD_MAP_FA = 0xfff, + CMD_UNMAP_FA = 0xffe, + CMD_RUN_FW = 0xff6, + CMD_MOD_STAT_CFG = 0x34, + CMD_QUERY_DEV_LIM = 0x3, + CMD_QUERY_FW = 0x4, + CMD_ENABLE_LAM = 0xff8, + CMD_DISABLE_LAM = 0xff7, + CMD_QUERY_DDR = 0x5, + CMD_QUERY_ADAPTER = 0x6, + CMD_INIT_HCA = 0x7, + CMD_CLOSE_HCA = 0x8, + CMD_INIT_IB = 0x9, + CMD_CLOSE_IB = 0xa, + CMD_QUERY_HCA = 0xb, + CMD_SET_IB = 0xc, + CMD_ACCESS_DDR = 0x2e, + CMD_MAP_ICM = 0xffa, + CMD_UNMAP_ICM = 0xff9, + CMD_MAP_ICM_AUX = 0xffc, + CMD_UNMAP_ICM_AUX = 0xffb, + CMD_SET_ICM_SIZE = 0xffd, + + /* TPT commands */ + CMD_SW2HW_MPT = 0xd, + CMD_QUERY_MPT = 0xe, + CMD_HW2SW_MPT = 0xf, + CMD_READ_MTT = 0x10, + CMD_WRITE_MTT = 0x11, + CMD_SYNC_TPT = 0x2f, + + /* EQ commands */ + CMD_MAP_EQ = 0x12, + CMD_SW2HW_EQ = 0x13, + CMD_HW2SW_EQ = 0x14, + CMD_QUERY_EQ = 0x15, + + /* CQ commands */ + CMD_SW2HW_CQ = 0x16, + CMD_HW2SW_CQ = 0x17, + CMD_QUERY_CQ = 0x18, + CMD_RESIZE_CQ = 0x2c, + + /* SRQ commands */ + CMD_SW2HW_SRQ = 0x35, + CMD_HW2SW_SRQ = 0x36, + CMD_QUERY_SRQ = 0x37, + CMD_ARM_SRQ = 0x40, + + /* QP/EE commands */ + CMD_RST2INIT_QPEE = 0x19, + CMD_INIT2RTR_QPEE = 0x1a, + CMD_RTR2RTS_QPEE = 0x1b, + CMD_RTS2RTS_QPEE = 0x1c, + CMD_SQERR2RTS_QPEE = 0x1d, + CMD_2ERR_QPEE = 0x1e, + CMD_RTS2SQD_QPEE = 0x1f, + CMD_SQD2SQD_QPEE = 0x38, + CMD_SQD2RTS_QPEE = 0x20, + CMD_ERR2RST_QPEE = 0x21, + CMD_QUERY_QPEE = 0x22, + CMD_INIT2INIT_QPEE = 0x2d, + CMD_SUSPEND_QPEE = 0x32, + CMD_UNSUSPEND_QPEE = 0x33, + /* special QPs and management commands */ + CMD_CONF_SPECIAL_QP = 0x23, + CMD_MAD_IFC = 0x24, + + /* multicast commands */ + CMD_READ_MGM = 0x25, + CMD_WRITE_MGM = 0x26, + CMD_MGID_HASH = 0x27, + + /* miscellaneous commands */ + CMD_DIAG_RPRT = 0x30, + CMD_NOP = 0x31, + + /* debug commands */ + CMD_QUERY_DEBUG_MSG = 0x2a, + CMD_SET_DEBUG_MSG = 0x2b, +}; + +/* + * According to Mellanox code, FW may be starved and never complete + * commands. So we can't use strict timeouts described in PRM -- we + * just arbitrarily select 60 seconds for now. + */ +#define HZ 1000000 /* 1 sec in usecs */ +#define CMD_POLL_N_TRIES 60 + +enum { + CMD_TIME_CLASS_A = 60 * HZ, + CMD_TIME_CLASS_B = 60 * HZ, + CMD_TIME_CLASS_C = 60 * HZ +}; + +enum { + GO_BIT_TIMEOUT = 10 * HZ +}; + +#define GO_BIT_N_TRIES 5 +#define GO_BIT_STALL_TIMEOUT ((GO_BIT_TIMEOUT/HZ)/GO_BIT_N_TRIES) /* usecs */ + +struct mthca_cmd_context { +#ifdef LINUX_TO_BE_CHANGED + struct completion done; + struct timer_list timer; +#else + KEVENT event; +#endif + int result; + int next; + u64 out_param; + u16 token; + u8 status; +}; + +static inline int go_bit(struct mthca_dev *dev) +{ + return readl(dev->hcr + HCR_STATUS_OFFSET) & + swab32(1 << HCR_GO_BIT); +} + +/* +* Function: performs busy-wait loop, while polling GO bit +* Return: 0 when GO bit was extinguished in time +*/ +static int poll_go_bit(struct mthca_dev *dev) +{ + int i=0; /* init must be here !*/ + + if (!go_bit(dev)) + return 0; + + for (; icmd.hcr_mutex)) + return -EINTR; + + if (event && wait_go_bit(dev,GO_BIT_TIMEOUT)) { + err = -EAGAIN; + goto out; + } + + /* + * We use writel (instead of something like memcpy_toio) + * because writes of less than 32 bits to the HCR don't work + * (and some architectures such as ia64 implement memcpy_toio + * in terms of writeb). + */ + __raw_writel((__force u32) cpu_to_be32(in_param >> 32), (u8 *)dev->hcr + 0 * 4); + __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), (u8 *) dev->hcr + 1 * 4); + __raw_writel((__force u32) cpu_to_be32(in_modifier), (u8 *)dev->hcr + 2 * 4); + __raw_writel((__force u32) cpu_to_be32(out_param >> 32), (u8 *)dev->hcr + 3 * 4); + __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), (u8 *)dev->hcr + 4 * 4); + __raw_writel((__force u32) cpu_to_be32(token << 16), (u8 *)dev->hcr + 5 * 4); + + /* __raw_writel may not order writes. */ + wmb(); + + __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) | + (event ? (1 << HCA_E_BIT) : 0) | + (op_modifier << HCR_OPMOD_SHIFT) | + op), (u8 *)dev->hcr + 6 * 4); + +out: + up(&dev->cmd.hcr_mutex); + return err; +} + + +static int mthca_cmd_poll(struct mthca_dev *dev, + u64 in_param, + u64 *out_param, + int out_is_imm, + u32 in_modifier, + u8 op_modifier, + u16 op, + unsigned long timeout, + u8 *status) +{ + int err = 0; + + if (sem_down_interruptible(&dev->cmd.poll_sem)) + return -EINTR; + + err = mthca_cmd_post(dev, in_param, + out_param ? *out_param : 0, + in_modifier, op_modifier, + op, CMD_POLL_TOKEN, 0); + if (err) + goto out; + + if (wait_go_bit(dev,timeout)) { + err = -EBUSY; + goto out; + } + + if (out_is_imm) + *out_param = + (u64) be32_to_cpu((__force __be32) + __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET)) << 32 | + (u64) be32_to_cpu((__force __be32) + __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4)); + + *status = (u8)(be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24); + +out: + sem_up(&dev->cmd.poll_sem); + return err; +} + +void mthca_cmd_event(struct mthca_dev *dev, + u16 token, + u8 status, + u64 out_param) +{ + struct mthca_cmd_context *context = + &dev->cmd.context[token & dev->cmd.token_mask]; + + /* previously timed out command completing at long last */ + if (token != context->token) + return; + + context->result = 0; + context->status = status; + context->out_param = out_param; + + context->token += dev->cmd.token_mask + 1; + +#ifdef LINUX_TO_BE_CHANGED + complete(&context->done); +#else + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + KeSetEvent( &context->event, 0, FALSE ); +#endif +} + +#ifdef LINUX_TO_BE_REMOVED +static void event_timeout(unsigned long context_ptr) +{ + struct mthca_cmd_context *context = + (struct mthca_cmd_context *) context_ptr; + + context->result = -EBUSY; + complete(&context->done); +} +#endif + +static int mthca_cmd_wait(struct mthca_dev *dev, + u64 in_param, + u64 *out_param, + int out_is_imm, + u32 in_modifier, + u8 op_modifier, + u16 op, + unsigned long timeout, + u8 *status) +{ + int err = 0; + struct mthca_cmd_context *context; + + if (sem_down_interruptible(&dev->cmd.event_sem)) + return -EINTR; + + spin_lock(&dev->cmd.context_lock); + BUG_ON(dev->cmd.free_head < 0); + context = &dev->cmd.context[dev->cmd.free_head]; + dev->cmd.free_head = context->next; + spin_unlock(&dev->cmd.context_lock); + +#ifdef LINUX_TO_BE_CHANGED + init_completion(&context->done); +#else + KeClearEvent( &context->event ); +#endif + + err = mthca_cmd_post(dev, in_param, + out_param ? *out_param : 0, + in_modifier, op_modifier, + op, context->token, 1); + if (err) + goto out; + +#ifdef LINUX_TO_BE_CHANGED + context->timer.expires = jiffies + timeout; + add_timer(&context->timer); + wait_for_completion(&context->done); + del_timer_sync(&context->timer); + err = context->result; + if (err) + goto out; +#else + { + //TODO: Questions: + // Can it once be on behalf of user request, which would require UserRequest and UserMode + // Can it be alertable ? + NTSTATUS res; + LARGE_INTEGER interval; + interval.QuadPart = (-10)* (__int64)timeout; + res = KeWaitForSingleObject( &context->event, Executive, KernelMode, FALSE, &interval ); + if (res != STATUS_SUCCESS) { + err = -EBUSY; + goto out; + } + } +#endif + + + *status = context->status; + if (*status) + mthca_dbg(dev, "Command %02x completed with status %02x\n", + op, *status); + + if (out_is_imm) + *out_param = context->out_param; + +out: + spin_lock(&dev->cmd.context_lock); + context->next = dev->cmd.free_head; + dev->cmd.free_head = (int)(context - dev->cmd.context); + spin_unlock(&dev->cmd.context_lock); + + sem_up( &dev->cmd.event_sem ); + + return err; +} + +/* Invoke a command with an output mailbox */ +static int mthca_cmd_box(struct mthca_dev *dev, + u64 in_param, + u64 out_param, + u32 in_modifier, + u8 op_modifier, + u16 op, + unsigned long timeout, + u8 *status) +{ + if (dev->cmd.use_events) + return mthca_cmd_wait(dev, in_param, &out_param, 0, + in_modifier, op_modifier, op, + timeout, status); + else + return mthca_cmd_poll(dev, in_param, &out_param, 0, + in_modifier, op_modifier, op, + timeout, status); +} + +/* Invoke a command with no output parameter */ +static int mthca_cmd(struct mthca_dev *dev, + u64 in_param, + u32 in_modifier, + u8 op_modifier, + u16 op, + unsigned long timeout, + u8 *status) +{ + return mthca_cmd_box(dev, in_param, 0, in_modifier, + op_modifier, op, timeout, status); +} + +/* + * Invoke a command with an immediate output parameter (and copy the + * output into the caller's out_param pointer after the command + * executes). + */ +static int mthca_cmd_imm(struct mthca_dev *dev, + u64 in_param, + u64 *out_param, + u32 in_modifier, + u8 op_modifier, + u16 op, + unsigned long timeout, + u8 *status) +{ + if (dev->cmd.use_events) + return mthca_cmd_wait(dev, in_param, out_param, 1, + in_modifier, op_modifier, op, + timeout, status); + else + return mthca_cmd_poll(dev, in_param, out_param, 1, + in_modifier, op_modifier, op, + timeout, status); +} + +int mthca_cmd_init(struct mthca_dev *dev) +{ + KeInitializeMutex(&dev->cmd.hcr_mutex, 0); + sem_init(&dev->cmd.poll_sem, 1, 1); + dev->cmd.use_events = 0; + + dev->hcr = ioremap(pci_resource_start(dev, HCA_BAR_TYPE_HCR) + MTHCA_HCR_BASE, + MTHCA_HCR_SIZE, &dev->hcr_size); + if (!dev->hcr) { + mthca_err(dev, "Couldn't map command register."); + return -ENOMEM; + } + + dev->cmd.pool = pci_pool_create("mthca_cmd", dev, + MTHCA_MAILBOX_SIZE, + MTHCA_MAILBOX_SIZE, 0); + if (!dev->cmd.pool) { + iounmap(dev->hcr, dev->hcr_size); + return -ENOMEM; + } + + return 0; +} + +void mthca_cmd_cleanup(struct mthca_dev *dev) +{ + pci_pool_destroy(dev->cmd.pool); + iounmap(dev->hcr, dev->hcr_size); +} + +/* + * Switch to using events to issue FW commands (should be called after + * event queue to command events has been initialized). + */ +int mthca_cmd_use_events(struct mthca_dev *dev) +{ + int i; + + dev->cmd.context = kmalloc(dev->cmd.max_cmds * + sizeof (struct mthca_cmd_context), + GFP_KERNEL); + if (!dev->cmd.context) + return -ENOMEM; + + for (i = 0; i < dev->cmd.max_cmds; ++i) { + dev->cmd.context[i].token = (u16)i; + dev->cmd.context[i].next = i + 1; +#ifdef LINUX_TO_BE_REMOVED + init_timer(&dev->cmd.context[i].timer); + dev->cmd.context[i].timer.data = + (unsigned long) &dev->cmd.context[i]; + dev->cmd.context[i].timer.function = event_timeout; +#else + KeInitializeEvent( &dev->cmd.context[i].event, NotificationEvent , FALSE ); +#endif + } + + dev->cmd.context[dev->cmd.max_cmds - 1].next = -1; + dev->cmd.free_head = 0; + + sem_init(&dev->cmd.event_sem, dev->cmd.max_cmds, LONG_MAX); + spin_lock_init(&dev->cmd.context_lock); + + for (dev->cmd.token_mask = 1; + dev->cmd.token_mask < dev->cmd.max_cmds; + dev->cmd.token_mask <<= 1) + ; /* nothing */ + --dev->cmd.token_mask; + + dev->cmd.use_events = 1; + sem_down(&dev->cmd.poll_sem); + + return 0; +} + +/* + * Switch back to polling (used when shutting down the device) + */ +void mthca_cmd_use_polling(struct mthca_dev *dev) +{ + int i; + + dev->cmd.use_events = 0; + + for (i = 0; i < dev->cmd.max_cmds; ++i) + sem_down(&dev->cmd.event_sem); + + kfree(dev->cmd.context); + + sem_up(&dev->cmd.poll_sem); +} + +struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev, + unsigned int gfp_mask) +{ + struct mthca_mailbox *mailbox; + + mailbox = kmalloc(sizeof *mailbox, gfp_mask); + if (!mailbox) + return ERR_PTR(-ENOMEM); + + mailbox->buf = pci_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma); + if (!mailbox->buf) { + kfree(mailbox); + return ERR_PTR(-ENOMEM); + } + + return mailbox; +} + +void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox) +{ + if (!mailbox) + return; + + pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); + kfree(mailbox); +} + +int mthca_SYS_EN(struct mthca_dev *dev, u8 *status) +{ + u64 out; + int ret; + + ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, HZ, status); + + if (*status == MTHCA_CMD_STAT_DDR_MEM_ERR) + mthca_warn(dev, "SYS_EN DDR error: syn=%x, sock=%d, " + "sladdr=%d, SPD source=%s\n", + (int) (out >> 6) & 0xf, (int) (out >> 4) & 3, + (int) (out >> 1) & 7, (int) out & 1 ? "NVMEM" : "DIMM"); + + return ret; +} + +int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status) +{ + return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, HZ, status); +} + +static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm, + u64 virt, u8 *status) +{ + struct mthca_mailbox *mailbox; + struct mthca_icm_iter iter; + __be64 *pages; + int lg; + int nent = 0; + unsigned long i; + int err = 0; + int ts = 0, tc = 0; + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + RtlZeroMemory(mailbox->buf, MTHCA_MAILBOX_SIZE); + pages = mailbox->buf; + + for (mthca_icm_first(icm, &iter); + !mthca_icm_last(&iter); + mthca_icm_next(&iter)) { + /* + * We have to pass pages that are aligned to their + * size, so find the least significant 1 in the + * address or size and use that as our log2 size. + */ + i = (u32)mthca_icm_addr(&iter) | mthca_icm_size(&iter); + lg = ffs(i) - 1; + if (lg < 12) { + mthca_warn(dev, "Got FW area not aligned to 4K (%llx/%lx).\n", + (unsigned long long) mthca_icm_addr(&iter), + mthca_icm_size(&iter)); + err = -EINVAL; + goto out; + } + for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i) { + if (virt != -1) { + pages[nent * 2] = cpu_to_be64(virt); + virt += 1 << lg; + } + + pages[nent * 2 + 1] = cpu_to_be64((mthca_icm_addr(&iter) + + (i << lg)) | (lg - 12)); + ts += 1 << (lg - 10); + ++tc; + + if (++nent == MTHCA_MAILBOX_SIZE / 16) { + err = mthca_cmd(dev, mailbox->dma, nent, 0, op, + CMD_TIME_CLASS_B, status); + if (err || *status) + goto out; + nent = 0; + } + } + } + + if (nent) + err = mthca_cmd(dev, mailbox->dma, nent, 0, op, + CMD_TIME_CLASS_B, status); + + switch (op) { + case CMD_MAP_FA: + mthca_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts); + break; + case CMD_MAP_ICM_AUX: + mthca_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts); + break; + case CMD_MAP_ICM: + mthca_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n", + tc, ts, (unsigned long long) virt - (ts << 10)); + break; + } + +out: + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status) +{ + return mthca_map_cmd(dev, CMD_MAP_FA, icm, -1, status); +} + +int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status) +{ + return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B, status); +} + +int mthca_RUN_FW(struct mthca_dev *dev, u8 *status) +{ + return mthca_cmd(dev, 0, 0, 0, CMD_RUN_FW, CMD_TIME_CLASS_A, status); +} + +int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status) +{ + struct mthca_mailbox *mailbox; + u32 *outbox; + int err = 0; + u8 lg; + +#define QUERY_FW_OUT_SIZE 0x100 +#define QUERY_FW_VER_OFFSET 0x00 +#define QUERY_FW_MAX_CMD_OFFSET 0x0f +#define QUERY_FW_ERR_START_OFFSET 0x30 +#define QUERY_FW_ERR_SIZE_OFFSET 0x38 + +#define QUERY_FW_START_OFFSET 0x20 +#define QUERY_FW_END_OFFSET 0x28 + +#define QUERY_FW_SIZE_OFFSET 0x00 +#define QUERY_FW_CLR_INT_BASE_OFFSET 0x20 +#define QUERY_FW_EQ_ARM_BASE_OFFSET 0x40 +#define QUERY_FW_EQ_SET_CI_BASE_OFFSET 0x48 + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW, + CMD_TIME_CLASS_A, status); + + if (err) + goto out; + + MTHCA_GET(dev->fw_ver, outbox, QUERY_FW_VER_OFFSET); + /* + * FW subSIZE_Tor version is at more signifant bits than minor + * version, so swap here. + */ + dev->fw_ver = (dev->fw_ver & 0xffff00000000ull) | + ((dev->fw_ver & 0xffff0000ull) >> 16) | + ((dev->fw_ver & 0x0000ffffull) << 16); + + MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); + dev->cmd.max_cmds = 1 << lg; + + mthca_dbg(dev, "FW version %012llx, max commands %d\n", + (unsigned long long) dev->fw_ver, dev->cmd.max_cmds); + + if (mthca_is_memfree(dev)) { + MTHCA_GET(dev->fw.arbel.fw_pages, outbox, QUERY_FW_SIZE_OFFSET); + MTHCA_GET(dev->fw.arbel.clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET); + MTHCA_GET(dev->fw.arbel.eq_arm_base, outbox, QUERY_FW_EQ_ARM_BASE_OFFSET); + MTHCA_GET(dev->fw.arbel.eq_set_ci_base, outbox, QUERY_FW_EQ_SET_CI_BASE_OFFSET); + mthca_dbg(dev, "FW size %d KB\n", dev->fw.arbel.fw_pages << 2); + + /* + * Arbel page size is always 4 KB; round up number of + * system pages needed. + */ + dev->fw.arbel.fw_pages = + (dev->fw.arbel.fw_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> + (PAGE_SHIFT - 12); + + mthca_dbg(dev, "Clear int @ %llx, EQ arm @ %llx, EQ set CI @ %llx\n", + (unsigned long long) dev->fw.arbel.clr_int_base, + (unsigned long long) dev->fw.arbel.eq_arm_base, + (unsigned long long) dev->fw.arbel.eq_set_ci_base); + } else { + MTHCA_GET(dev->fw.tavor.fw_start, outbox, QUERY_FW_START_OFFSET); + MTHCA_GET(dev->fw.tavor.fw_end, outbox, QUERY_FW_END_OFFSET); + + mthca_dbg(dev, "FW size %d KB (start %llx, end %llx)\n", + (int) ((dev->fw.tavor.fw_end - dev->fw.tavor.fw_start) >> 10), + (unsigned long long) dev->fw.tavor.fw_start, + (unsigned long long) dev->fw.tavor.fw_end); + } + +out: + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status) +{ + struct mthca_mailbox *mailbox; + u8 info; + u32 *outbox; + int err = 0; + +#define ENABLE_LAM_OUT_SIZE 0x100 +#define ENABLE_LAM_START_OFFSET 0x00 +#define ENABLE_LAM_END_OFFSET 0x08 +#define ENABLE_LAM_INFO_OFFSET 0x13 + +#define ENABLE_LAM_INFO_HIDDEN_FLAG (1 << 4) +#define ENABLE_LAM_INFO_ECC_MASK 0x3 + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM, + CMD_TIME_CLASS_C, status); + + if (err) + goto out; + + if (*status == MTHCA_CMD_STAT_LAM_NOT_PRE) + goto out; + + MTHCA_GET(dev->ddr_start, outbox, ENABLE_LAM_START_OFFSET); + MTHCA_GET(dev->ddr_end, outbox, ENABLE_LAM_END_OFFSET); + MTHCA_GET(info, outbox, ENABLE_LAM_INFO_OFFSET); + + if (!!(info & ENABLE_LAM_INFO_HIDDEN_FLAG) != + !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) { + mthca_info(dev, "FW reports that HCA-attached memory " + "is %s hidden; does not match PCI config\n", + (info & ENABLE_LAM_INFO_HIDDEN_FLAG) ? + "" : "not"); + } + if (info & ENABLE_LAM_INFO_HIDDEN_FLAG) + mthca_dbg(dev, "HCA-attached memory is hidden.\n"); + + mthca_dbg(dev, "HCA memory size %d KB (start %llx, end %llx)\n", + (int) ((dev->ddr_end - dev->ddr_start) >> 10), + (unsigned long long) dev->ddr_start, + (unsigned long long) dev->ddr_end); + +out: + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status) +{ + return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C, status); +} + +int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status) +{ + struct mthca_mailbox *mailbox; + u8 info; + u32 *outbox; + int err = 0; + +#define QUERY_DDR_OUT_SIZE 0x100 +#define QUERY_DDR_START_OFFSET 0x00 +#define QUERY_DDR_END_OFFSET 0x08 +#define QUERY_DDR_INFO_OFFSET 0x13 + +#define QUERY_DDR_INFO_HIDDEN_FLAG (1 << 4) +#define QUERY_DDR_INFO_ECC_MASK 0x3 + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR, + CMD_TIME_CLASS_A, status); + + if (err) + goto out; + + MTHCA_GET(dev->ddr_start, outbox, QUERY_DDR_START_OFFSET); + MTHCA_GET(dev->ddr_end, outbox, QUERY_DDR_END_OFFSET); + MTHCA_GET(info, outbox, QUERY_DDR_INFO_OFFSET); + + if (!!(info & QUERY_DDR_INFO_HIDDEN_FLAG) != + !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) { + mthca_info(dev, "FW reports that HCA-attached memory " + "is %s hidden; does not match PCI config\n", + (info & QUERY_DDR_INFO_HIDDEN_FLAG) ? + "" : "not"); + } + if (info & QUERY_DDR_INFO_HIDDEN_FLAG) + mthca_dbg(dev, "HCA-attached memory is hidden.\n"); + + mthca_dbg(dev, "HCA memory size %d KB (start %llx, end %llx)\n", + (int) ((dev->ddr_end - dev->ddr_start) >> 10), + (unsigned long long) dev->ddr_start, + (unsigned long long) dev->ddr_end); + +out: + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, + struct mthca_dev_lim *dev_lim, u8 *status) +{ + struct mthca_mailbox *mailbox; + u32 *outbox; + u8 field; + u16 size; + int err; + +#define QUERY_DEV_LIM_OUT_SIZE 0x100 +#define QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET 0x10 +#define QUERY_DEV_LIM_MAX_QP_SZ_OFFSET 0x11 +#define QUERY_DEV_LIM_RSVD_QP_OFFSET 0x12 +#define QUERY_DEV_LIM_MAX_QP_OFFSET 0x13 +#define QUERY_DEV_LIM_RSVD_SRQ_OFFSET 0x14 +#define QUERY_DEV_LIM_MAX_SRQ_OFFSET 0x15 +#define QUERY_DEV_LIM_RSVD_EEC_OFFSET 0x16 +#define QUERY_DEV_LIM_MAX_EEC_OFFSET 0x17 +#define QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET 0x19 +#define QUERY_DEV_LIM_RSVD_CQ_OFFSET 0x1a +#define QUERY_DEV_LIM_MAX_CQ_OFFSET 0x1b +#define QUERY_DEV_LIM_MAX_MPT_OFFSET 0x1d +#define QUERY_DEV_LIM_RSVD_EQ_OFFSET 0x1e +#define QUERY_DEV_LIM_MAX_EQ_OFFSET 0x1f +#define QUERY_DEV_LIM_RSVD_MTT_OFFSET 0x20 +#define QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET 0x21 +#define QUERY_DEV_LIM_RSVD_MRW_OFFSET 0x22 +#define QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET 0x23 +#define QUERY_DEV_LIM_MAX_AV_OFFSET 0x27 +#define QUERY_DEV_LIM_MAX_REQ_QP_OFFSET 0x29 +#define QUERY_DEV_LIM_MAX_RES_QP_OFFSET 0x2b +#define QUERY_DEV_LIM_MAX_RDMA_OFFSET 0x2f +#define QUERY_DEV_LIM_RSZ_SRQ_OFFSET 0x33 +#define QUERY_DEV_LIM_ACK_DELAY_OFFSET 0x35 +#define QUERY_DEV_LIM_MTU_WIDTH_OFFSET 0x36 +#define QUERY_DEV_LIM_VL_PORT_OFFSET 0x37 +#define QUERY_DEV_LIM_MAX_GID_OFFSET 0x3b +#define QUERY_DEV_LIM_MAX_PKEY_OFFSET 0x3f +#define QUERY_DEV_LIM_FLAGS_OFFSET 0x44 +#define QUERY_DEV_LIM_RSVD_UAR_OFFSET 0x48 +#define QUERY_DEV_LIM_UAR_SZ_OFFSET 0x49 +#define QUERY_DEV_LIM_PAGE_SZ_OFFSET 0x4b +#define QUERY_DEV_LIM_MAX_SG_OFFSET 0x51 +#define QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET 0x52 +#define QUERY_DEV_LIM_MAX_SG_RQ_OFFSET 0x55 +#define QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET 0x56 +#define QUERY_DEV_LIM_MAX_QP_MCG_OFFSET 0x61 +#define QUERY_DEV_LIM_RSVD_MCG_OFFSET 0x62 +#define QUERY_DEV_LIM_MAX_MCG_OFFSET 0x63 +#define QUERY_DEV_LIM_RSVD_PD_OFFSET 0x64 +#define QUERY_DEV_LIM_MAX_PD_OFFSET 0x65 +#define QUERY_DEV_LIM_RSVD_RDD_OFFSET 0x66 +#define QUERY_DEV_LIM_MAX_RDD_OFFSET 0x67 +#define QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET 0x80 +#define QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET 0x82 +#define QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET 0x84 +#define QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET 0x86 +#define QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET 0x88 +#define QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET 0x8a +#define QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET 0x8c +#define QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET 0x8e +#define QUERY_DEV_LIM_MTT_ENTRY_SZ_OFFSET 0x90 +#define QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET 0x92 +#define QUERY_DEV_LIM_PBL_SZ_OFFSET 0x96 +#define QUERY_DEV_LIM_BMME_FLAGS_OFFSET 0x97 +#define QUERY_DEV_LIM_RSVD_LKEY_OFFSET 0x98 +#define QUERY_DEV_LIM_LAMR_OFFSET 0x9f +#define QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET 0xa0 + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM, + CMD_TIME_CLASS_A, status); + + if (err) + goto out; + + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET); + dev_lim->max_srq_sz = 1 << field; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET); + dev_lim->max_qp_sz = 1 << field; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET); + dev_lim->reserved_qps = 1 << (field & 0xf); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET); + dev_lim->max_qps = 1 << (field & 0x1f); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_SRQ_OFFSET); + dev_lim->reserved_srqs = 1 << (field >> 4); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_OFFSET); + dev_lim->max_srqs = 1 << (field & 0x1f); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EEC_OFFSET); + dev_lim->reserved_eecs = 1 << (field & 0xf); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EEC_OFFSET); + dev_lim->max_eecs = 1 << (field & 0x1f); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET); + dev_lim->max_cq_sz = 1 << field; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_CQ_OFFSET); + dev_lim->reserved_cqs = 1 << (field & 0xf); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_OFFSET); + dev_lim->max_cqs = 1 << (field & 0x1f); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MPT_OFFSET); + dev_lim->max_mpts = 1 << (field & 0x3f); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EQ_OFFSET); + dev_lim->reserved_eqs = 1 << (field & 0xf); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EQ_OFFSET); + dev_lim->max_eqs = 1 << (field & 0x7); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MTT_OFFSET); + dev_lim->reserved_mtts = 1 << (field >> 4); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET); + dev_lim->max_mrw_sz = 1 << field; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MRW_OFFSET); + dev_lim->reserved_mrws = 1 << (field & 0xf); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET); + dev_lim->max_mtt_seg = 1 << (field & 0x3f); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_REQ_QP_OFFSET); + dev_lim->max_requester_per_qp = 1 << (field & 0x3f); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RES_QP_OFFSET); + dev_lim->max_responder_per_qp = 1 << (field & 0x3f); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDMA_OFFSET); + dev_lim->max_rdma_global = 1 << (field & 0x3f); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_ACK_DELAY_OFFSET); + dev_lim->local_ca_ack_delay = field & 0x1f; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MTU_WIDTH_OFFSET); + dev_lim->max_mtu = field >> 4; + dev_lim->max_port_width = field & 0xf; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_VL_PORT_OFFSET); + dev_lim->max_vl = field >> 4; + dev_lim->num_ports = field & 0xf; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_GID_OFFSET); + dev_lim->max_gids = 1 << (field & 0xf); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PKEY_OFFSET); + dev_lim->max_pkeys = 1 << (field & 0xf); + MTHCA_GET(dev_lim->flags, outbox, QUERY_DEV_LIM_FLAGS_OFFSET); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_UAR_OFFSET); + dev_lim->reserved_uars = field >> 4; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_UAR_SZ_OFFSET); + dev_lim->uar_size = 1 << ((field & 0x3f) + 20); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_PAGE_SZ_OFFSET); + dev_lim->min_page_sz = 1 << field; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_OFFSET); + dev_lim->max_sg = field; + + MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET); + dev_lim->max_desc_sz = size; + + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_MCG_OFFSET); + dev_lim->max_qp_per_mcg = 1 << field; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MCG_OFFSET); + dev_lim->reserved_mgms = field & 0xf; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MCG_OFFSET); + dev_lim->max_mcgs = 1 << field; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_PD_OFFSET); + dev_lim->reserved_pds = field >> 4; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PD_OFFSET); + dev_lim->max_pds = 1 << (field & 0x3f); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_RDD_OFFSET); + dev_lim->reserved_rdds = field >> 4; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDD_OFFSET); + dev_lim->max_rdds = 1 << (field & 0x3f); + + MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET); + dev_lim->eec_entry_sz = size; + MTHCA_GET(size, outbox, QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET); + dev_lim->qpc_entry_sz = size; + MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET); + dev_lim->eeec_entry_sz = size; + MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET); + dev_lim->eqpc_entry_sz = size; + MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET); + dev_lim->eqc_entry_sz = size; + MTHCA_GET(size, outbox, QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET); + dev_lim->cqc_entry_sz = size; + MTHCA_GET(size, outbox, QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET); + dev_lim->srq_entry_sz = size; + MTHCA_GET(size, outbox, QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET); + dev_lim->uar_scratch_entry_sz = size; + + mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", + dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz); + mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", + dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz); + mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", + dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz); + mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", + dev_lim->max_eqs, dev_lim->reserved_eqs, dev_lim->eqc_entry_sz); + mthca_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", + dev_lim->reserved_mrws, dev_lim->reserved_mtts); + mthca_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", + dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars); + mthca_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", + dev_lim->max_pds, dev_lim->reserved_mgms); + + mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags); + + if (mthca_is_memfree(dev)) { + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSZ_SRQ_OFFSET); + dev_lim->hca.arbel.resize_srq = field & 1; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET); + dev_lim->max_sg = min(field, dev_lim->max_sg); + MTHCA_GET(size, outbox, QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET); + dev_lim->mpt_entry_sz = size; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_PBL_SZ_OFFSET); + dev_lim->hca.arbel.max_pbl_sz = 1 << (field & 0x3f); + MTHCA_GET(dev_lim->hca.arbel.bmme_flags, outbox, + QUERY_DEV_LIM_BMME_FLAGS_OFFSET); + MTHCA_GET(dev_lim->hca.arbel.reserved_lkey, outbox, + QUERY_DEV_LIM_RSVD_LKEY_OFFSET); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_LAMR_OFFSET); + dev_lim->hca.arbel.lam_required = field & 1; + MTHCA_GET(dev_lim->hca.arbel.max_icm_sz, outbox, + QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET); + + if (dev_lim->hca.arbel.bmme_flags & 1) + mthca_dbg(dev, "Base MM extensions: yes " + "(flags %d, max PBL %d, rsvd L_Key %08x)\n", + dev_lim->hca.arbel.bmme_flags, + dev_lim->hca.arbel.max_pbl_sz, + dev_lim->hca.arbel.reserved_lkey); + else + mthca_dbg(dev, "Base MM extensions: no\n"); + + mthca_dbg(dev, "Max ICM size %lld MB\n", + (unsigned long long) dev_lim->hca.arbel.max_icm_sz >> 20); + } else { + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_AV_OFFSET); + dev_lim->hca.tavor.max_avs = 1 << (field & 0x3f); + dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE; + } + +out: + mthca_free_mailbox(dev, mailbox); + return err; +} + +static void get_board_id(u8 *vsd, char *board_id) +{ + int i; + +#define VSD_OFFSET_SIG1 0x00 +#define VSD_OFFSET_SIG2 0xde +#define VSD_OFFSET_MLX_BOARD_ID 0xd0 +#define VSD_OFFSET_TS_BOARD_ID 0x20 + +#define VSD_SIGNATURE_TOPSPIN 0x5ad + + RtlZeroMemory(board_id, MTHCA_BOARD_ID_LEN); + + if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN && + be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) { + strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN); + } else { + /* + * The board ID is a string but the firmware byte + * swaps each 4-byte word before passing it back to + * us. Therefore we need to swab it before printing. + */ + for (i = 0; i < 4; ++i) + ((u32 *) board_id)[i] = + swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4)); + } +} + +int mthca_QUERY_ADAPTER(struct mthca_dev *dev, + struct mthca_adapter *adapter, u8 *status) +{ + struct mthca_mailbox *mailbox; + u32 *outbox; + int err; + +#define QUERY_ADAPTER_OUT_SIZE 0x100 +#define QUERY_ADAPTER_VENDOR_ID_OFFSET 0x00 +#define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04 +#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08 +#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 +#define QUERY_ADAPTER_VSD_OFFSET 0x20 + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER, + CMD_TIME_CLASS_A, status); + + if (err) + goto out; + + MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET); + MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET); + MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET); + MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); + + get_board_id((u8*)outbox + QUERY_ADAPTER_VSD_OFFSET, + adapter->board_id); + +out: + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_INIT_HCA(struct mthca_dev *dev, + struct mthca_init_hca_param *param, + u8 *status) +{ + struct mthca_mailbox *mailbox; + __be32 *inbox; + int err; + +#define INIT_HCA_IN_SIZE 0x200 +#define INIT_HCA_FLAGS_OFFSET 0x014 +#define INIT_HCA_QPC_OFFSET 0x020 +#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) +#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) +#define INIT_HCA_EEC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x20) +#define INIT_HCA_LOG_EEC_OFFSET (INIT_HCA_QPC_OFFSET + 0x27) +#define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28) +#define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f) +#define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) +#define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) +#define INIT_HCA_EQPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) +#define INIT_HCA_EEEC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) +#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) +#define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) +#define INIT_HCA_RDB_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) +#define INIT_HCA_UDAV_OFFSET 0x0b0 +#define INIT_HCA_UDAV_LKEY_OFFSET (INIT_HCA_UDAV_OFFSET + 0x0) +#define INIT_HCA_UDAV_PD_OFFSET (INIT_HCA_UDAV_OFFSET + 0x4) +#define INIT_HCA_MCAST_OFFSET 0x0c0 +#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) +#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) +#define INIT_HCA_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) +#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) +#define INIT_HCA_TPT_OFFSET 0x0f0 +#define INIT_HCA_MPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) +#define INIT_HCA_MTT_SEG_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x09) +#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) +#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) +#define INIT_HCA_UAR_OFFSET 0x120 +#define INIT_HCA_UAR_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x00) +#define INIT_HCA_UARC_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x09) +#define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a) +#define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b) +#define INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10) +#define INIT_HCA_UAR_CTX_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x18) + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + RtlZeroMemory(inbox, INIT_HCA_IN_SIZE); + +#if defined(__LITTLE_ENDIAN) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); +#elif defined(__BIG_ENDIAN) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1); +#else +#error Host endianness not defined +#endif + /* Check port for UD address vector: */ + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1); + + /* We leave wqe_quota, responder_exu, etc as 0 (default) */ + + /* QPC/EEC/CQC/EQC/RDB attributes */ + + MTHCA_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); + MTHCA_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET); + MTHCA_PUT(inbox, param->eec_base, INIT_HCA_EEC_BASE_OFFSET); + MTHCA_PUT(inbox, param->log_num_eecs, INIT_HCA_LOG_EEC_OFFSET); + MTHCA_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET); + MTHCA_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET); + MTHCA_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET); + MTHCA_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET); + MTHCA_PUT(inbox, param->eqpc_base, INIT_HCA_EQPC_BASE_OFFSET); + MTHCA_PUT(inbox, param->eeec_base, INIT_HCA_EEEC_BASE_OFFSET); + MTHCA_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); + MTHCA_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); + MTHCA_PUT(inbox, param->rdb_base, INIT_HCA_RDB_BASE_OFFSET); + + /* UD AV attributes */ + + /* multicast attributes */ + + MTHCA_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); + MTHCA_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); + MTHCA_PUT(inbox, param->mc_hash_sz, INIT_HCA_MC_HASH_SZ_OFFSET); + MTHCA_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + + /* TPT attributes */ + + MTHCA_PUT(inbox, param->mpt_base, INIT_HCA_MPT_BASE_OFFSET); + if (!mthca_is_memfree(dev)) + MTHCA_PUT(inbox, param->mtt_seg_sz, INIT_HCA_MTT_SEG_SZ_OFFSET); + MTHCA_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); + MTHCA_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); + + /* UAR attributes */ + { + u8 uar_page_sz = PAGE_SHIFT - 12; + MTHCA_PUT(inbox, uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET); + } + + MTHCA_PUT(inbox, param->uar_scratch_base, INIT_HCA_UAR_SCATCH_BASE_OFFSET); + + if (mthca_is_memfree(dev)) { + MTHCA_PUT(inbox, param->log_uarc_sz, INIT_HCA_UARC_SZ_OFFSET); + MTHCA_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); + MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET); + } + + err = mthca_cmd(dev, mailbox->dma, 0, 0, CMD_INIT_HCA, HZ, status); + + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_INIT_IB(struct mthca_dev *dev, + struct mthca_init_ib_param *param, + int port, u8 *status) +{ + struct mthca_mailbox *mailbox; + u32 *inbox; + int err; + u32 flags; + +#define INIT_IB_IN_SIZE 56 +#define INIT_IB_FLAGS_OFFSET 0x00 +#define INIT_IB_FLAG_SIG (1 << 18) +#define INIT_IB_FLAG_NG (1 << 17) +#define INIT_IB_FLAG_G0 (1 << 16) +#define INIT_IB_FLAG_1X (1 << 8) +#define INIT_IB_FLAG_4X (1 << 9) +#define INIT_IB_FLAG_12X (1 << 11) +#define INIT_IB_VL_SHIFT 4 +#define INIT_IB_MTU_SHIFT 12 +#define INIT_IB_MAX_GID_OFFSET 0x06 +#define INIT_IB_MAX_PKEY_OFFSET 0x0a +#define INIT_IB_GUID0_OFFSET 0x10 +#define INIT_IB_NODE_GUID_OFFSET 0x18 +#define INIT_IB_SI_GUID_OFFSET 0x20 + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + RtlZeroMemory(inbox, INIT_IB_IN_SIZE); + + flags = 0; + flags |= param->enable_1x ? INIT_IB_FLAG_1X : 0; + flags |= param->enable_4x ? INIT_IB_FLAG_4X : 0; + flags |= param->set_guid0 ? INIT_IB_FLAG_G0 : 0; + flags |= param->set_node_guid ? INIT_IB_FLAG_NG : 0; + flags |= param->set_si_guid ? INIT_IB_FLAG_SIG : 0; + flags |= param->vl_cap << INIT_IB_VL_SHIFT; + flags |= param->mtu_cap << INIT_IB_MTU_SHIFT; + MTHCA_PUT(inbox, flags, INIT_IB_FLAGS_OFFSET); + + MTHCA_PUT(inbox, param->gid_cap, INIT_IB_MAX_GID_OFFSET); + MTHCA_PUT(inbox, param->pkey_cap, INIT_IB_MAX_PKEY_OFFSET); + MTHCA_PUT(inbox, param->guid0, INIT_IB_GUID0_OFFSET); + MTHCA_PUT(inbox, param->node_guid, INIT_IB_NODE_GUID_OFFSET); + MTHCA_PUT(inbox, param->si_guid, INIT_IB_SI_GUID_OFFSET); + + err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB, + CMD_TIME_CLASS_A, status); + + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status) +{ + return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, HZ, status); +} + +int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status) +{ + return mthca_cmd(dev, 0, 0, (u8)panic, CMD_CLOSE_HCA, HZ, status); +} + +int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param, + int port, u8 *status) +{ + struct mthca_mailbox *mailbox; + u32 *inbox; + int err; + u32 flags = 0; + +#define SET_IB_IN_SIZE 0x40 +#define SET_IB_FLAGS_OFFSET 0x00 +#define SET_IB_FLAG_SIG (1 << 18) +#define SET_IB_FLAG_RQK (1 << 0) +#define SET_IB_CAP_MASK_OFFSET 0x04 +#define SET_IB_SI_GUID_OFFSET 0x08 + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + RtlZeroMemory(inbox, SET_IB_IN_SIZE); + + flags |= param->set_si_guid ? SET_IB_FLAG_SIG : 0; + flags |= param->reset_qkey_viol ? SET_IB_FLAG_RQK : 0; + MTHCA_PUT(inbox, flags, SET_IB_FLAGS_OFFSET); + + MTHCA_PUT(inbox, param->cap_mask, SET_IB_CAP_MASK_OFFSET); + MTHCA_PUT(inbox, param->si_guid, SET_IB_SI_GUID_OFFSET); + + err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB, + CMD_TIME_CLASS_B, status); + + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status) +{ + return mthca_map_cmd(dev, CMD_MAP_ICM, icm, virt, status); +} + +int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status) +{ + struct mthca_mailbox *mailbox; + __be64 *inbox; + int err; + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + inbox[0] = cpu_to_be64(virt); + inbox[1] = cpu_to_be64(dma_addr); + + err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM, + CMD_TIME_CLASS_B, status); + + mthca_free_mailbox(dev, mailbox); + + if (!err) + mthca_dbg(dev, "Mapped page at %llx to %llx for ICM.\n", + (unsigned long long) dma_addr, (unsigned long long) virt); + + return err; +} + +int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status) +{ + mthca_dbg(dev, "Unmapping %d pages at %llx from ICM.\n", + page_count, (unsigned long long) virt); + + return mthca_cmd(dev, virt, page_count, 0, CMD_UNMAP_ICM, CMD_TIME_CLASS_B, status); +} + +int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status) +{ + return mthca_map_cmd(dev, CMD_MAP_ICM_AUX, icm, -1, status); +} + +int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status) +{ + return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_ICM_AUX, CMD_TIME_CLASS_B, status); +} + +int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages, + u8 *status) +{ + int ret = mthca_cmd_imm(dev, icm_size, aux_pages, 0, 0, CMD_SET_ICM_SIZE, + CMD_TIME_CLASS_A, status); + + if (ret || status) + return ret; + + /* + * Arbel page size is always 4 KB; round up number of system + * pages needed. + */ + *aux_pages = (*aux_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> (PAGE_SHIFT - 12); + + return 0; +} + +int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int mpt_index, u8 *status) +{ + return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT, + CMD_TIME_CLASS_B, status); +} + +int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int mpt_index, u8 *status) +{ + return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, + !mailbox, CMD_HW2SW_MPT, + CMD_TIME_CLASS_B, status); +} + +int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int num_mtt, u8 *status) +{ + return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT, + CMD_TIME_CLASS_B, status); +} + +int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status) +{ + return mthca_cmd(dev, 0, 0, 0, CMD_SYNC_TPT, CMD_TIME_CLASS_B, status); +} + +int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap, + int eq_num, u8 *status) +{ + mthca_dbg(dev, "%s mask %016llx for eqn %d\n", + unmap ? "Clearing" : "Setting", + (unsigned long long) event_mask, eq_num); + return mthca_cmd(dev, event_mask, (unmap << 31) | eq_num, + 0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status); +} + +int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int eq_num, u8 *status) +{ + return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ, + CMD_TIME_CLASS_A, status); +} + +int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int eq_num, u8 *status) +{ + return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0, + CMD_HW2SW_EQ, + CMD_TIME_CLASS_A, status); +} + +int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int cq_num, u8 *status) +{ + return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ, + CMD_TIME_CLASS_A, status); +} + +int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int cq_num, u8 *status) +{ + return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0, + CMD_HW2SW_CQ, + CMD_TIME_CLASS_A, status); +} + +int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int srq_num, u8 *status) +{ + return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ, + CMD_TIME_CLASS_A, status); +} + +int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int srq_num, u8 *status) +{ + return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0, + CMD_HW2SW_SRQ, + CMD_TIME_CLASS_A, status); +} + +int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status) +{ + return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ, + CMD_TIME_CLASS_B, status); +} + +int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, + int is_ee, struct mthca_mailbox *mailbox, u32 optmask, + u8 *status) +{ + enum { + MTHCA_TRANS_INVALID = 0, + MTHCA_TRANS_RST2INIT, + MTHCA_TRANS_INIT2INIT, + MTHCA_TRANS_INIT2RTR, + MTHCA_TRANS_RTR2RTS, + MTHCA_TRANS_RTS2RTS, + MTHCA_TRANS_SQERR2RTS, + MTHCA_TRANS_ANY2ERR, + MTHCA_TRANS_RTS2SQD, + MTHCA_TRANS_SQD2SQD, + MTHCA_TRANS_SQD2RTS, + MTHCA_TRANS_ANY2RST, + }; + static const u16 op[] = { + 0, /* MTHCA_TRANS_INVALID */ + CMD_RST2INIT_QPEE, /* MTHCA_TRANS_RST2INIT */ + CMD_INIT2INIT_QPEE, /* MTHCA_TRANS_INIT2INIT */ + CMD_INIT2RTR_QPEE, /* MTHCA_TRANS_INIT2RTR */ + CMD_RTR2RTS_QPEE, /* MTHCA_TRANS_RTR2RTS */ + CMD_RTS2RTS_QPEE, /* MTHCA_TRANS_RTS2RTS */ + CMD_SQERR2RTS_QPEE, /* MTHCA_TRANS_SQERR2RTS */ + CMD_2ERR_QPEE, /* MTHCA_TRANS_ANY2ERR */ + CMD_RTS2SQD_QPEE, /* MTHCA_TRANS_RTS2SQD */ + CMD_SQD2SQD_QPEE, /* MTHCA_TRANS_SQD2SQD */ + CMD_SQD2RTS_QPEE, /* MTHCA_TRANS_SQD2RTS */ + CMD_ERR2RST_QPEE /* MTHCA_TRANS_ANY2RST */ + }; + u8 op_mod = 0; + int my_mailbox = 0; + int err; + + if (trans < 0 || trans >= ARRAY_SIZE(op)) + return -EINVAL; + + if (trans == MTHCA_TRANS_ANY2RST) { + op_mod = 3; /* don't write outbox, any->reset */ + + /* For debugging */ + if (!mailbox) { + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (!IS_ERR(mailbox)) { + my_mailbox = 1; + op_mod = 2; /* write outbox, any->reset */ + } else + mailbox = NULL; + } + } else { + if (0) { + int i; + mthca_dbg(dev, "Dumping QP context:\n"); + printk(" opt param mask: %08x\n", be32_to_cpup((__be32 *)mailbox->buf)); + for (i = 0; i < 0x100 / 4; ++i) { + if (i % 8 == 0) + printk(" [%02x] ", i * 4); + printk(" %08x", + be32_to_cpu(((__be32 *) mailbox->buf)[i + 2])); + if ((i + 1) % 8 == 0) + printk("\n"); + } + } + } + + if (trans == MTHCA_TRANS_ANY2RST) { + err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, + (!!is_ee << 24) | num, op_mod, + op[trans], CMD_TIME_CLASS_C, status); + + if (0 && mailbox) { + int i; + mthca_dbg(dev, "Dumping QP context:\n"); + printk(" %08x\n", be32_to_cpup((__be32 *)mailbox->buf)); + for (i = 0; i < 0x100 / 4; ++i) { + if (i % 8 == 0) + printk("[%02x] ", i * 4); + printk(" %08x", + be32_to_cpu(((__be32 *) mailbox->buf)[i + 2])); + if ((i + 1) % 8 == 0) + printk("\n"); + } + } + + } else + err = mthca_cmd(dev, mailbox->dma, (!!is_ee << 24) | num, + op_mod, op[trans], CMD_TIME_CLASS_C, status); + + if (my_mailbox) + mthca_free_mailbox(dev, mailbox); + + return err; +} + +int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee, + struct mthca_mailbox *mailbox, u8 *status) +{ + return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0, + CMD_QUERY_QPEE, CMD_TIME_CLASS_A, status); +} + +int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn, + u8 *status) +{ + u8 op_mod; + + switch (type) { + case IB_QPT_QP0: + op_mod = 0; + break; + case IB_QPT_QP1: + op_mod = 1; + break; + case IB_QPT_RAW_IPV6: + op_mod = 2; + break; + case IB_QPT_RAW_ETHER: + op_mod = 3; + break; + default: + return -EINVAL; + } + + return mthca_cmd(dev, 0, qpn, op_mod, CMD_CONF_SPECIAL_QP, + CMD_TIME_CLASS_B, status); +} + +int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, + int port, struct ib_wc *in_wc, struct ib_grh *in_grh, + void *in_mad, void *response_mad, u8 *status) +{ + struct mthca_mailbox *inmailbox, *outmailbox; + u8 *inbox; + int err; + u32 in_modifier = port; + u8 op_modifier = 0; + +#define MAD_IFC_BOX_SIZE 0x400 +#define MAD_IFC_MY_QPN_OFFSET 0x100 +#define MAD_IFC_RQPN_OFFSET 0x104 +#define MAD_IFC_SL_OFFSET 0x108 +#define MAD_IFC_G_PATH_OFFSET 0x109 +#define MAD_IFC_RLID_OFFSET 0x10a +#define MAD_IFC_PKEY_OFFSET 0x10e +#define MAD_IFC_GRH_OFFSET 0x140 + + inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(inmailbox)) + return PTR_ERR(inmailbox); + inbox = inmailbox->buf; + + outmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(outmailbox)) { + mthca_free_mailbox(dev, inmailbox); + return PTR_ERR(outmailbox); + } + + memcpy(inbox, in_mad, 256); + + /* + * Key check traps can't be generated unless we have in_wc to + * tell us where to send the trap. + */ + if (ignore_mkey || !in_wc) + op_modifier |= 0x1; + if (ignore_bkey || !in_wc) + op_modifier |= 0x2; + + if (in_wc) { + u8 val; + + RtlZeroMemory(inbox + 256, 256); + + MTHCA_PUT(inbox, in_wc->qp_num, MAD_IFC_MY_QPN_OFFSET); + MTHCA_PUT(inbox, in_wc->src_qp, MAD_IFC_RQPN_OFFSET); + + val = in_wc->sl << 4; + MTHCA_PUT(inbox, val, MAD_IFC_SL_OFFSET); + + val = in_wc->dlid_path_bits | + (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0); + MTHCA_PUT(inbox, val, MAD_IFC_GRH_OFFSET); + + MTHCA_PUT(inbox, in_wc->slid, MAD_IFC_RLID_OFFSET); + MTHCA_PUT(inbox, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET); + + if (in_grh) + memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40); + + op_modifier |= 0x10; + + in_modifier |= in_wc->slid << 16; + } + + err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma, + in_modifier, op_modifier, + CMD_MAD_IFC, CMD_TIME_CLASS_C, status); + + if (!err && !*status) + memcpy(response_mad, outmailbox->buf, 256); + + mthca_free_mailbox(dev, inmailbox); + mthca_free_mailbox(dev, outmailbox); + return err; +} + +int mthca_READ_MGM(struct mthca_dev *dev, int index, + struct mthca_mailbox *mailbox, u8 *status) +{ + return mthca_cmd_box(dev, 0, mailbox->dma, index, 0, + CMD_READ_MGM, CMD_TIME_CLASS_A, status); +} + +int mthca_WRITE_MGM(struct mthca_dev *dev, int index, + struct mthca_mailbox *mailbox, u8 *status) +{ + return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM, + CMD_TIME_CLASS_A, status); +} + +int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + u16 *hash, u8 *status) +{ + u64 imm; + int err; + + err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH, + CMD_TIME_CLASS_A, status); + + *hash = (u16)imm; + return err; +} + +int mthca_NOP(struct mthca_dev *dev, u8 *status) +{ + return mthca_cmd(dev, 0, 0x1f, 0, CMD_NOP, 100000, status); /* 100 msecs */ +} diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_cmd.h b/branches/MTHCA/hw/mthca/kernel/mthca_cmd.h new file mode 100644 index 00000000..f79232a7 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_cmd.h @@ -0,0 +1,325 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_cmd.h 3047 2005-08-10 03:59:35Z roland $ + */ + +#ifndef MTHCA_CMD_H +#define MTHCA_CMD_H + +#include + +#define MTHCA_MAILBOX_SIZE 4096 + +enum { + /* command completed successfully: */ + MTHCA_CMD_STAT_OK = 0x00, + /* Internal error (such as a bus error) occurred while processing command: */ + MTHCA_CMD_STAT_INTERNAL_ERR = 0x01, + /* Operation/command not supported or opcode modifier not supported: */ + MTHCA_CMD_STAT_BAD_OP = 0x02, + /* Parameter not supported or parameter out of range: */ + MTHCA_CMD_STAT_BAD_PARAM = 0x03, + /* System not enabled or bad system state: */ + MTHCA_CMD_STAT_BAD_SYS_STATE = 0x04, + /* Attempt to access reserved or unallocaterd resource: */ + MTHCA_CMD_STAT_BAD_RESOURCE = 0x05, + /* Requested resource is currently executing a command, or is otherwise busy: */ + MTHCA_CMD_STAT_RESOURCE_BUSY = 0x06, + /* memory error: */ + MTHCA_CMD_STAT_DDR_MEM_ERR = 0x07, + /* Required capability exceeds device limits: */ + MTHCA_CMD_STAT_EXCEED_LIM = 0x08, + /* Resource is not in the appropriate state or ownership: */ + MTHCA_CMD_STAT_BAD_RES_STATE = 0x09, + /* Index out of range: */ + MTHCA_CMD_STAT_BAD_INDEX = 0x0a, + /* FW image corrupted: */ + MTHCA_CMD_STAT_BAD_NVMEM = 0x0b, + /* Attempt to modify a QP/EE which is not in the presumed state: */ + MTHCA_CMD_STAT_BAD_QPEE_STATE = 0x10, + /* Bad segment parameters (Address/Size): */ + MTHCA_CMD_STAT_BAD_SEG_PARAM = 0x20, + /* Memory Region has Memory Windows bound to: */ + MTHCA_CMD_STAT_REG_BOUND = 0x21, + /* HCA local attached memory not present: */ + MTHCA_CMD_STAT_LAM_NOT_PRE = 0x22, + /* Bad management packet (silently discarded): */ + MTHCA_CMD_STAT_BAD_PKT = 0x30, + /* More outstanding CQEs in CQ than new CQ size: */ + MTHCA_CMD_STAT_BAD_SIZE = 0x40 +}; + +enum { + MTHCA_TRANS_INVALID = 0, + MTHCA_TRANS_RST2INIT, + MTHCA_TRANS_INIT2INIT, + MTHCA_TRANS_INIT2RTR, + MTHCA_TRANS_RTR2RTS, + MTHCA_TRANS_RTS2RTS, + MTHCA_TRANS_SQERR2RTS, + MTHCA_TRANS_ANY2ERR, + MTHCA_TRANS_RTS2SQD, + MTHCA_TRANS_SQD2SQD, + MTHCA_TRANS_SQD2RTS, + MTHCA_TRANS_ANY2RST, +}; + +enum { + DEV_LIM_FLAG_RC = 1 << 0, + DEV_LIM_FLAG_UC = 1 << 1, + DEV_LIM_FLAG_UD = 1 << 2, + DEV_LIM_FLAG_RD = 1 << 3, + DEV_LIM_FLAG_RAW_IPV6 = 1 << 4, + DEV_LIM_FLAG_RAW_ETHER = 1 << 5, + DEV_LIM_FLAG_SRQ = 1 << 6, + DEV_LIM_FLAG_BAD_PKEY_CNTR = 1 << 8, + DEV_LIM_FLAG_BAD_QKEY_CNTR = 1 << 9, + DEV_LIM_FLAG_MW = 1 << 16, + DEV_LIM_FLAG_AUTO_PATH_MIG = 1 << 17, + DEV_LIM_FLAG_ATOMIC = 1 << 18, + DEV_LIM_FLAG_RAW_MULTI = 1 << 19, + DEV_LIM_FLAG_UD_AV_PORT_ENFORCE = 1 << 20, + DEV_LIM_FLAG_UD_MULTI = 1 << 21, +}; + +struct mthca_mailbox { + dma_addr_t dma; + void *buf; +}; + +struct mthca_dev_lim { + int max_srq_sz; + int max_qp_sz; + int reserved_qps; + int max_qps; + int reserved_srqs; + int max_srqs; + int reserved_eecs; + int max_eecs; + int max_cq_sz; + int reserved_cqs; + int max_cqs; + int max_mpts; + int reserved_eqs; + int max_eqs; + int reserved_mtts; + int max_mrw_sz; + int reserved_mrws; + int max_mtt_seg; + int max_requester_per_qp; + int max_responder_per_qp; + int max_rdma_global; + int local_ca_ack_delay; + int max_mtu; + int max_port_width; + int max_vl; + int num_ports; + int max_gids; + int max_pkeys; + u32 flags; + int reserved_uars; + int uar_size; + int min_page_sz; + int max_sg; + int max_desc_sz; + int max_qp_per_mcg; + int reserved_mgms; + int max_mcgs; + int reserved_pds; + int max_pds; + int reserved_rdds; + int max_rdds; + int eec_entry_sz; + int qpc_entry_sz; + int eeec_entry_sz; + int eqpc_entry_sz; + int eqc_entry_sz; + int cqc_entry_sz; + int srq_entry_sz; + int uar_scratch_entry_sz; + int mpt_entry_sz; + union { + struct { + int max_avs; + } tavor; + struct { + int resize_srq; + int max_pbl_sz; + u8 bmme_flags; + u32 reserved_lkey; + int lam_required; + u64 max_icm_sz; + } arbel; + } hca; +}; + +struct mthca_adapter { + u32 vendor_id; + u32 device_id; + u32 revision_id; + char board_id[MTHCA_BOARD_ID_LEN]; + u8 inta_pin; +}; + +struct mthca_init_hca_param { + u64 qpc_base; + u64 eec_base; + u64 srqc_base; + u64 cqc_base; + u64 eqpc_base; + u64 eeec_base; + u64 eqc_base; + u64 rdb_base; + u64 mc_base; + u64 mpt_base; + u64 mtt_base; + u64 uar_scratch_base; + u64 uarc_base; + u16 log_mc_entry_sz; + u16 mc_hash_sz; + u8 log_num_qps; + u8 log_num_eecs; + u8 log_num_srqs; + u8 log_num_cqs; + u8 log_num_eqs; + u8 log_mc_table_sz; + u8 mtt_seg_sz; + u8 log_mpt_sz; + u8 log_uar_sz; + u8 log_uarc_sz; +}; + +struct mthca_init_ib_param { + int enable_1x; + int enable_4x; + int vl_cap; + int mtu_cap; + u16 gid_cap; + u16 pkey_cap; + int set_guid0; + u64 guid0; + int set_node_guid; + u64 node_guid; + int set_si_guid; + u64 si_guid; +}; + +struct mthca_set_ib_param { + int set_si_guid; + int reset_qkey_viol; + u64 si_guid; + u32 cap_mask; +}; + +int mthca_cmd_init(struct mthca_dev *dev); +void mthca_cmd_cleanup(struct mthca_dev *dev); +int mthca_cmd_use_events(struct mthca_dev *dev); +void mthca_cmd_use_polling(struct mthca_dev *dev); +void mthca_cmd_event(struct mthca_dev *dev, u16 token, + u8 status, u64 out_param); + +struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev, + unsigned int gfp_mask); +void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox); + +int mthca_SYS_EN(struct mthca_dev *dev, u8 *status); +int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status); +int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status); +int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status); +int mthca_RUN_FW(struct mthca_dev *dev, u8 *status); +int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status); +int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status); +int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status); +int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status); +int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, + struct mthca_dev_lim *dev_lim, u8 *status); +int mthca_QUERY_ADAPTER(struct mthca_dev *dev, + struct mthca_adapter *adapter, u8 *status); +int mthca_INIT_HCA(struct mthca_dev *dev, + struct mthca_init_hca_param *param, + u8 *status); +int mthca_INIT_IB(struct mthca_dev *dev, + struct mthca_init_ib_param *param, + int port, u8 *status); +int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status); +int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status); +int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param, + int port, u8 *status); +int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status); +int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status); +int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status); +int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status); +int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status); +int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages, + u8 *status); +int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int mpt_index, u8 *status); +int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int mpt_index, u8 *status); +int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int num_mtt, u8 *status); +int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status); +int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap, + int eq_num, u8 *status); +int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int eq_num, u8 *status); +int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int eq_num, u8 *status); +int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int cq_num, u8 *status); +int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int cq_num, u8 *status); +int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int srq_num, u8 *status); +int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int srq_num, u8 *status); +int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status); +int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, + int is_ee, struct mthca_mailbox *mailbox, u32 optmask, + u8 *status); +int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee, + struct mthca_mailbox *mailbox, u8 *status); +int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn, + u8 *status); +int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, + int port, struct ib_wc *in_wc, struct ib_grh *in_grh, + void *in_mad, void *response_mad, u8 *status); +int mthca_READ_MGM(struct mthca_dev *dev, int index, + struct mthca_mailbox *mailbox, u8 *status); +int mthca_WRITE_MGM(struct mthca_dev *dev, int index, + struct mthca_mailbox *mailbox, u8 *status); +int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + u16 *hash, u8 *status); +int mthca_NOP(struct mthca_dev *dev, u8 *status); + +#endif /* MTHCA_CMD_H */ diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_config_reg.h b/branches/MTHCA/hw/mthca/kernel/mthca_config_reg.h new file mode 100644 index 00000000..d12084a8 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_config_reg.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_config_reg.h 2803 2005-07-05 15:58:55Z roland $ + */ + +#ifndef MTHCA_CONFIG_REG_H +#define MTHCA_CONFIG_REG_H + +#define MTHCA_HCR_BASE 0x80680 +#define MTHCA_HCR_SIZE 0x0001c +#define MTHCA_ECR_BASE 0x80700 +#define MTHCA_ECR_SIZE 0x00008 +#define MTHCA_ECR_CLR_BASE 0x80708 +#define MTHCA_ECR_CLR_SIZE 0x00008 +#define MTHCA_MAP_ECR_SIZE (MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE) +#define MTHCA_CLR_INT_BASE 0xf00d8 +#define MTHCA_CLR_INT_SIZE 0x00008 +#define MTHCA_EQ_SET_CI_SIZE (8 * 32) + +#endif /* MTHCA_CONFIG_REG_H */ diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_cq.c b/branches/MTHCA/hw/mthca/kernel/mthca_cq.c new file mode 100644 index 00000000..42593cb9 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_cq.c @@ -0,0 +1,907 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_cq.c 3047 2005-08-10 03:59:35Z roland $ + */ + +#include + +#include "mthca_dev.h" +#include "mthca_cmd.h" +#include "mthca_memfree.h" + +enum { + MTHCA_MAX_DIRECT_CQ_SIZE = 4 * PAGE_SIZE +}; + +enum { + MTHCA_CQ_ENTRY_SIZE = 0x20 +}; + +/* + * Must be packed because start is 64 bits but only aligned to 32 bits. + */ +#pragma pack(push,1) +struct mthca_cq_context { + __be32 flags; + __be64 start; + __be32 logsize_usrpage; + __be32 error_eqn; /* Tavor only */ + __be32 comp_eqn; + __be32 pd; + __be32 lkey; + __be32 last_notified_index; + __be32 solicit_producer_index; + __be32 consumer_index; + __be32 producer_index; + __be32 cqn; + __be32 ci_db; /* Arbel only */ + __be32 state_db; /* Arbel only */ + u32 reserved; +} __attribute__((packed)); +#pragma pack(pop) + +#define MTHCA_CQ_STATUS_OK ( 0 << 28) +#define MTHCA_CQ_STATUS_OVERFLOW ( 9 << 28) +#define MTHCA_CQ_STATUS_WRITE_FAIL (10 << 28) +#define MTHCA_CQ_FLAG_TR ( 1 << 18) +#define MTHCA_CQ_FLAG_OI ( 1 << 17) +#define MTHCA_CQ_STATE_DISARMED ( 0 << 8) +#define MTHCA_CQ_STATE_ARMED ( 1 << 8) +#define MTHCA_CQ_STATE_ARMED_SOL ( 4 << 8) +#define MTHCA_EQ_STATE_FIRED (10 << 8) + +enum { + MTHCA_ERROR_CQE_OPCODE_MASK = 0xfe +}; + +enum { + SYNDROME_LOCAL_LENGTH_ERR = 0x01, + SYNDROME_LOCAL_QP_OP_ERR = 0x02, + SYNDROME_LOCAL_EEC_OP_ERR = 0x03, + SYNDROME_LOCAL_PROT_ERR = 0x04, + SYNDROME_WR_FLUSH_ERR = 0x05, + SYNDROME_MW_BIND_ERR = 0x06, + SYNDROME_BAD_RESP_ERR = 0x10, + SYNDROME_LOCAL_ACCESS_ERR = 0x11, + SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12, + SYNDROME_REMOTE_ACCESS_ERR = 0x13, + SYNDROME_REMOTE_OP_ERR = 0x14, + SYNDROME_RETRY_EXC_ERR = 0x15, + SYNDROME_RNR_RETRY_EXC_ERR = 0x16, + SYNDROME_LOCAL_RDD_VIOL_ERR = 0x20, + SYNDROME_REMOTE_INVAL_RD_REQ_ERR = 0x21, + SYNDROME_REMOTE_ABORTED_ERR = 0x22, + SYNDROME_INVAL_EECN_ERR = 0x23, + SYNDROME_INVAL_EEC_STATE_ERR = 0x24 +}; + +struct mthca_cqe { + __be32 my_qpn; + __be32 my_ee; + __be32 rqpn; + __be16 sl_g_mlpath; + __be16 rlid; + __be32 imm_etype_pkey_eec; + __be32 byte_cnt; + __be32 wqe; + u8 opcode; + u8 is_send; + u8 reserved; + u8 owner; +}; + +struct mthca_err_cqe { + __be32 my_qpn; + u32 reserved1[3]; + u8 syndrome; + u8 reserved2; + __be16 db_cnt; + u32 reserved3; + __be32 wqe; + u8 opcode; + u8 reserved4[2]; + u8 owner; +}; + +#define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7) +#define MTHCA_CQ_ENTRY_OWNER_HW (1 << 7) + +#define MTHCA_TAVOR_CQ_DB_INC_CI (1 << 24) +#define MTHCA_TAVOR_CQ_DB_REQ_NOT (2 << 24) +#define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL (3 << 24) +#define MTHCA_TAVOR_CQ_DB_SET_CI (4 << 24) +#define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24) + +#define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL (1 << 24) +#define MTHCA_ARBEL_CQ_DB_REQ_NOT (2 << 24) +#define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24) + +static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry) +{ + if (cq->is_direct) + return (struct mthca_cqe *)(cq->queue.direct.buf + (entry * MTHCA_CQ_ENTRY_SIZE)); + else + return (struct mthca_cqe *)(cq->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf + + (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE); +} + +static inline struct mthca_cqe *cqe_sw(struct mthca_cq *cq, int i) +{ + struct mthca_cqe *cqe = get_cqe(cq, i); + return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; +} + +static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq) +{ + return cqe_sw(cq, cq->cons_index & cq->ibcq.cqe); +} + +static inline void set_cqe_hw(struct mthca_cqe *cqe) +{ + cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; +} + +static void dump_cqe(struct mthca_dev *dev, void *cqe_ptr) +{ + __be32 *cqe = cqe_ptr; + + (void) cqe; /* avoid warning if mthca_dbg compiled away... */ + mthca_dbg(dev, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n", + be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), + be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), + be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7])); +} + +/* + * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index + * should be correct before calling update_cons_index(). + */ +static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, + int incr) +{ + __be32 doorbell[2]; + + if (mthca_is_memfree(dev)) { + *cq->set_ci_db = cpu_to_be32(cq->cons_index); + wmb(); + } else { + doorbell[0] = cpu_to_be32(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn); + doorbell[1] = cpu_to_be32(incr - 1); + + mthca_write64(doorbell, + dev->kar + MTHCA_CQ_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); + } +} + +void mthca_cq_completion(struct mthca_dev *dev, u32 cqn) +{ + struct mthca_cq *cq; + + cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); + + if (!cq) { + mthca_warn(dev, "Completion event for bogus CQ %08x\n", cqn); + return; + } + + ++cq->arm_sn; + + cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); +} + +void mthca_cq_event(struct mthca_dev *dev, u32 cqn, + enum ib_event_type event_type) +{ + struct mthca_cq *cq; + struct ib_event event; + + spin_lock(&dev->cq_table.lock); + + cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); + + if (cq) + atomic_inc(&cq->refcount); + spin_unlock(&dev->cq_table.lock); + + if (!cq) { + mthca_warn(dev, "Async event for bogus CQ %08x\n", cqn); + return; + } + + event.device = &dev->ib_dev; + event.event = event_type; + event.element.cq = &cq->ibcq; + if (cq->ibcq.event_handler) + cq->ibcq.event_handler(&event, cq->ibcq.cq_context); + + if (atomic_dec_and_test(&cq->refcount)) + wake_up(&cq->wait); +} + +void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, + struct mthca_srq *srq) +{ + struct mthca_cq *cq; + struct mthca_cqe *cqe; + u32 prod_index; + int nfreed = 0; + + spin_lock_irq(&dev->cq_table.lock); + cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); + if (cq) + atomic_inc(&cq->refcount); + spin_unlock_irq(&dev->cq_table.lock); + + if (!cq) + return; + + spin_lock_irq(&cq->lock); + + /* + * First we need to find the current producer index, so we + * know where to start cleaning from. It doesn't matter if HW + * adds new entries after this loop -- the QP we're worried + * about is already in RESET, so the new entries won't come + * from our QP and therefore don't need to be checked. + */ + for (prod_index = cq->cons_index; + cqe_sw(cq, prod_index & cq->ibcq.cqe); + ++prod_index) + if (prod_index == cq->cons_index + cq->ibcq.cqe) + break; + + if (0) + mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", + qpn, cqn, cq->cons_index, prod_index); + + /* + * Now sweep backwards through the CQ, removing CQ entries + * that match our QP by copying older entries on top of them. + */ + while (prod_index > cq->cons_index) { + cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe); + if (cqe->my_qpn == cpu_to_be32(qpn)) { + if (srq) + mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe)); + ++nfreed; + } + else if (nfreed) + memcpy(get_cqe(cq, (prod_index - 1 + nfreed) & + cq->ibcq.cqe), + cqe, + MTHCA_CQ_ENTRY_SIZE); + --prod_index; + } + + if (nfreed) { + wmb(); + cq->cons_index += nfreed; + update_cons_index(dev, cq, nfreed); + } + + spin_unlock_irq(&cq->lock); + if (atomic_dec_and_test(&cq->refcount)) + wake_up(&cq->wait); +} + +static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, + struct mthca_qp *qp, int wqe_index, int is_send, + struct mthca_err_cqe *cqe, + struct ib_wc *entry, int *free_cqe) +{ + int err; + int dbd; + __be32 new_wqe; + + if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) { + mthca_dbg(dev, "local QP operation err " + "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n", + be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe), + cq->cqn, cq->cons_index); + dump_cqe(dev, cqe); + } + + /* + * For completions in error, only work request ID, status (and + * freed resource count for RD) have to be set. + */ + switch (cqe->syndrome) { + case SYNDROME_LOCAL_LENGTH_ERR: + entry->status = IB_WC_LOC_LEN_ERR; + break; + case SYNDROME_LOCAL_QP_OP_ERR: + entry->status = IB_WC_LOC_QP_OP_ERR; + break; + case SYNDROME_LOCAL_EEC_OP_ERR: + entry->status = IB_WC_LOC_EEC_OP_ERR; + break; + case SYNDROME_LOCAL_PROT_ERR: + entry->status = IB_WC_LOC_PROT_ERR; + break; + case SYNDROME_WR_FLUSH_ERR: + entry->status = IB_WC_WR_FLUSH_ERR; + break; + case SYNDROME_MW_BIND_ERR: + entry->status = IB_WC_MW_BIND_ERR; + break; + case SYNDROME_BAD_RESP_ERR: + entry->status = IB_WC_BAD_RESP_ERR; + break; + case SYNDROME_LOCAL_ACCESS_ERR: + entry->status = IB_WC_LOC_ACCESS_ERR; + break; + case SYNDROME_REMOTE_INVAL_REQ_ERR: + entry->status = IB_WC_REM_INV_REQ_ERR; + break; + case SYNDROME_REMOTE_ACCESS_ERR: + entry->status = IB_WC_REM_ACCESS_ERR; + break; + case SYNDROME_REMOTE_OP_ERR: + entry->status = IB_WC_REM_OP_ERR; + break; + case SYNDROME_RETRY_EXC_ERR: + entry->status = IB_WC_RETRY_EXC_ERR; + break; + case SYNDROME_RNR_RETRY_EXC_ERR: + entry->status = IB_WC_RNR_RETRY_EXC_ERR; + break; + case SYNDROME_LOCAL_RDD_VIOL_ERR: + entry->status = IB_WC_LOC_RDD_VIOL_ERR; + break; + case SYNDROME_REMOTE_INVAL_RD_REQ_ERR: + entry->status = IB_WC_REM_INV_RD_REQ_ERR; + break; + case SYNDROME_REMOTE_ABORTED_ERR: + entry->status = IB_WC_REM_ABORT_ERR; + break; + case SYNDROME_INVAL_EECN_ERR: + entry->status = IB_WC_INV_EECN_ERR; + break; + case SYNDROME_INVAL_EEC_STATE_ERR: + entry->status = IB_WC_INV_EEC_STATE_ERR; + break; + default: + entry->status = IB_WC_GENERAL_ERR; + break; + } + + /* + * Mem-free HCAs always generate one CQE per WQE, even in the + * error case, so we don't have to check the doorbell count, etc. + */ + if (mthca_is_memfree(dev)) + return 0; + + err = mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe); + if (err) + return err; + + /* + * If we're at the end of the WQE chain, or we've used up our + * doorbell count, free the CQE. Otherwise just update it for + * the next poll operation. + */ + if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd)) + return 0; + + cqe->db_cnt = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd); + cqe->wqe = new_wqe; + cqe->syndrome = SYNDROME_WR_FLUSH_ERR; + + *free_cqe = 0; + + return 0; +} + +static inline int mthca_poll_one(struct mthca_dev *dev, + struct mthca_cq *cq, + struct mthca_qp **cur_qp, + int *freed, + struct ib_wc *entry) +{ + struct mthca_wq *wq; + struct mthca_cqe *cqe; + unsigned wqe_index; + int is_error; + int is_send; + int free_cqe = 1; + int err = 0; + + cqe = next_cqe_sw(cq); + if (!cqe) + return -EAGAIN; + + /* + * Make sure we read CQ entry contents after we've checked the + * ownership bit. + */ + rmb(); + + if (0) { + mthca_dbg(dev, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n", + cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn), + be32_to_cpu(cqe->wqe)); + dump_cqe(dev, cqe); + } + + is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) == + MTHCA_ERROR_CQE_OPCODE_MASK; + is_send = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80; + + if (!*cur_qp || be32_to_cpu(cqe->my_qpn) != (*cur_qp)->qpn) { + /* + * We do not have to take the QP table lock here, + * because CQs will be locked while QPs are removed + * from the table. + */ + *cur_qp = mthca_array_get(&dev->qp_table.qp, + be32_to_cpu(cqe->my_qpn) & + (dev->limits.num_qps - 1)); + if (!*cur_qp) { + mthca_warn(dev, "CQ entry for unknown QP %06x\n", + be32_to_cpu(cqe->my_qpn) & 0xffffff); + err = -EINVAL; + goto out; + } + } + + entry->qp_num = (*cur_qp)->qpn; + + if (is_send) { + wq = &(*cur_qp)->sq; + wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset) + >> wq->wqe_shift); + entry->wr_id = (*cur_qp)->wrid[wqe_index + + (*cur_qp)->rq.max]; + } else if ((*cur_qp)->ibqp.srq) { + struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq); + u32 wqe = be32_to_cpu(cqe->wqe); + wq = NULL; + wqe_index = wqe >> srq->wqe_shift; + entry->wr_id = srq->wrid[wqe_index]; + mthca_free_srq_wqe(srq, wqe); + } else { + wq = &(*cur_qp)->rq; + wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift; + entry->wr_id = (*cur_qp)->wrid[wqe_index]; + } + + if (wq) { + if (wq->last_comp < wqe_index) + wq->tail += wqe_index - wq->last_comp; + else + wq->tail += wqe_index + wq->max - wq->last_comp; + + wq->last_comp = wqe_index; + } + + if (is_error) { + err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send, + (struct mthca_err_cqe *) cqe, + entry, &free_cqe); + goto out; + } + + if (is_send) { + entry->wc_flags = 0; + switch (cqe->opcode) { + case MTHCA_OPCODE_RDMA_WRITE: + entry->opcode = IB_WC_RDMA_WRITE; + break; + case MTHCA_OPCODE_RDMA_WRITE_IMM: + entry->opcode = IB_WC_RDMA_WRITE; + entry->wc_flags |= IB_WC_WITH_IMM; + break; + case MTHCA_OPCODE_SEND: + entry->opcode = IB_WC_SEND; + break; + case MTHCA_OPCODE_SEND_IMM: + entry->opcode = IB_WC_SEND; + entry->wc_flags |= IB_WC_WITH_IMM; + break; + case MTHCA_OPCODE_RDMA_READ: + entry->opcode = IB_WC_RDMA_READ; + entry->byte_len = be32_to_cpu(cqe->byte_cnt); + break; + case MTHCA_OPCODE_ATOMIC_CS: + entry->opcode = IB_WC_COMPARE_SWAP; + entry->byte_len = be32_to_cpu(cqe->byte_cnt); + break; + case MTHCA_OPCODE_ATOMIC_FA: + entry->opcode = IB_WC_FETCH_ADD; + entry->byte_len = be32_to_cpu(cqe->byte_cnt); + break; + case MTHCA_OPCODE_BIND_MW: + entry->opcode = IB_WC_MW_BIND; + break; + default: + entry->opcode = MTHCA_OPCODE_INVALID; + break; + } + } else { + entry->byte_len = be32_to_cpu(cqe->byte_cnt); + switch (cqe->opcode & 0x1f) { + case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE: + case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE: + entry->wc_flags = IB_WC_WITH_IMM; + entry->imm_data = cqe->imm_etype_pkey_eec; + entry->opcode = IB_WC_RECV; + break; + case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE: + case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE: + entry->wc_flags = IB_WC_WITH_IMM; + entry->imm_data = cqe->imm_etype_pkey_eec; + entry->opcode = IB_WC_RECV_RDMA_WRITE; + break; + default: + entry->wc_flags = 0; + entry->opcode = IB_WC_RECV; + break; + } + entry->slid = be16_to_cpu(cqe->rlid); + entry->sl = be16_to_cpu(cqe->sl_g_mlpath) >> 12; + entry->src_qp = be32_to_cpu(cqe->rqpn) & 0xffffff; + entry->dlid_path_bits = be16_to_cpu(cqe->sl_g_mlpath) & 0x7f; + entry->pkey_index = (u16)(be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16); + entry->wc_flags |= be16_to_cpu(cqe->sl_g_mlpath) & 0x80 ? + IB_WC_GRH : 0; + } + + entry->status = IB_WC_SUCCESS; + + out: + if (likely(free_cqe)) { + set_cqe_hw(cqe); + ++(*freed); + ++cq->cons_index; + } + + return err; +} + +int mthca_poll_cq(struct ib_cq *ibcq, int num_entries, + struct ib_wc *entry) +{ + struct mthca_dev *dev = to_mdev(ibcq->device); + struct mthca_cq *cq = to_mcq(ibcq); + struct mthca_qp *qp = NULL; + int err = 0; + int freed = 0; + int npolled; + + spin_lock_irqsave(&cq->lock); + + for (npolled = 0; npolled < num_entries; ++npolled) { + err = mthca_poll_one(dev, cq, &qp, + &freed, entry + npolled); + if (err) + break; + } + + if (freed) { + wmb(); + update_cons_index(dev, cq, freed); + } + + spin_unlock_irqrestore(&cq->lock); + + return err == 0 || err == -EAGAIN ? npolled : err; +} + +int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify) +{ + __be32 doorbell[2]; + + doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ? + MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL : + MTHCA_TAVOR_CQ_DB_REQ_NOT) | + to_mcq(cq)->cqn); + doorbell[1] = (__force __be32) 0xffffffff; + + mthca_write64(doorbell, + to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq->device)->doorbell_lock)); + + return 0; +} + +int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) +{ + struct mthca_cq *cq = to_mcq(ibcq); + __be32 doorbell[2]; + u32 sn; + __be32 ci; + + sn = cq->arm_sn & 3; + ci = cpu_to_be32(cq->cons_index); + + doorbell[0] = ci; + doorbell[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) | + (notify == IB_CQ_SOLICITED ? 1 : 2)); + + mthca_write_db_rec(doorbell, cq->arm_db); + + /* + * Make sure that the doorbell record in host memory is + * written before ringing the doorbell via PCI MMIO. + */ + wmb(); + + doorbell[0] = cpu_to_be32((sn << 28) | + (notify == IB_CQ_SOLICITED ? + MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL : + MTHCA_ARBEL_CQ_DB_REQ_NOT) | + cq->cqn); + doorbell[1] = ci; + + mthca_write64(doorbell, + to_mdev(ibcq->device)->kar + MTHCA_CQ_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->doorbell_lock)); + + return 0; +} + +static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq) +{ + mthca_buf_free(dev, (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE, + &cq->queue, cq->is_direct, &cq->mr); +} + +int mthca_init_cq(struct mthca_dev *dev, int nent, + struct mthca_ucontext *ctx, u32 pdn, + struct mthca_cq *cq) +{ + int size = nent * MTHCA_CQ_ENTRY_SIZE; + struct mthca_mailbox *mailbox; + struct mthca_cq_context *cq_context; + int err = -ENOMEM; + u8 status; + int i; + + might_sleep(); + + cq->ibcq.cqe = nent - 1; + cq->is_kernel = !ctx; + + cq->cqn = mthca_alloc(&dev->cq_table.alloc); + if (cq->cqn == -1) + return -ENOMEM; + + if (mthca_is_memfree(dev)) { + err = mthca_table_get(dev, dev->cq_table.table, cq->cqn); + if (err) + goto err_out; + + if (cq->is_kernel) { + cq->arm_sn = 1; + + err = -ENOMEM; + + cq->set_ci_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, + cq->cqn, &cq->set_ci_db); + if (cq->set_ci_db_index < 0) + goto err_out_icm; + + cq->arm_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_ARM, + cq->cqn, &cq->arm_db); + if (cq->arm_db_index < 0) + goto err_out_ci; + } + } + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + goto err_out_arm; + + cq_context = mailbox->buf; + + if (cq->is_kernel) { + err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_CQ_SIZE, + &cq->queue, &cq->is_direct, + &dev->driver_pd, 1, &cq->mr); + if (err) + goto err_out_mailbox; + + for (i = 0; i < nent; ++i) + set_cqe_hw(get_cqe(cq, i)); + } + + spin_lock_init(&cq->lock); + atomic_set(&cq->refcount, 1); + init_waitqueue_head(&cq->wait); + + RtlZeroMemory(cq_context, sizeof *cq_context); + cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK | + MTHCA_CQ_STATE_DISARMED | + MTHCA_CQ_FLAG_TR); + cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); + if (ctx) + cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index); + else + cq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index); + cq_context->error_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn); + cq_context->comp_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn); + cq_context->pd = cpu_to_be32(pdn); + cq_context->lkey = cpu_to_be32(cq->mr.ibmr.lkey); + cq_context->cqn = cpu_to_be32(cq->cqn); + + if (mthca_is_memfree(dev)) { + cq_context->ci_db = cpu_to_be32(cq->set_ci_db_index); + cq_context->state_db = cpu_to_be32(cq->arm_db_index); + } + + err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status); + if (err) { + mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err); + goto err_out_free_mr; + } + + if (status) { + mthca_warn(dev, "SW2HW_CQ returned status 0x%02x\n", + status); + err = -EINVAL; + goto err_out_free_mr; + } + + spin_lock_irq(&dev->cq_table.lock); + if (mthca_array_set(&dev->cq_table.cq, + cq->cqn & (dev->limits.num_cqs - 1), + cq)) { + spin_unlock_irq(&dev->cq_table.lock); + goto err_out_free_mr; + } + spin_unlock_irq(&dev->cq_table.lock); + + cq->cons_index = 0; + + mthca_free_mailbox(dev, mailbox); + + return 0; + +err_out_free_mr: + if (cq->is_kernel) + mthca_free_cq_buf(dev, cq); + +err_out_mailbox: + mthca_free_mailbox(dev, mailbox); + +err_out_arm: + if (cq->is_kernel && mthca_is_memfree(dev)) + mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); + +err_out_ci: + if (cq->is_kernel && mthca_is_memfree(dev)) + mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index); + +err_out_icm: + mthca_table_put(dev, dev->cq_table.table, cq->cqn); + +err_out: + mthca_free(&dev->cq_table.alloc, cq->cqn); + + return err; +} + +void mthca_free_cq(struct mthca_dev *dev, + struct mthca_cq *cq) +{ + struct mthca_mailbox *mailbox; + int err; + u8 status; + + might_sleep(); + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) { + mthca_warn(dev, "No memory for mailbox to free CQ.\n"); + return; + } + + err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status); + if (err) + mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err); + else if (status) + mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status); + + if (0) { + __be32 *ctx = mailbox->buf; + int j; + + printk(KERN_ERROR "context for CQN %x (cons index %x, next sw %d)\n", + cq->cqn, cq->cons_index, + cq->is_kernel ? !!next_cqe_sw(cq) : 0); + for (j = 0; j < 16; ++j) + printk(KERN_ERROR "[%2x] %08x\n", j * 4, be32_to_cpu(ctx[j])); + } + + spin_lock_irq(&dev->cq_table.lock); + mthca_array_clear(&dev->cq_table.cq, + cq->cqn & (dev->limits.num_cqs - 1)); + spin_unlock_irq(&dev->cq_table.lock); + + /* wait for all RUNNING DPCs on that EQ to complete */ + { + struct mthca_eq *eq; + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + // wait for DPCs, using this EQ, to complete + spin_lock_sync( &dev->eq_table.eq[MTHCA_EQ_COMP].lock); + //TODO: do we need that ? + spin_lock_sync( &dev->eq_table.eq[MTHCA_EQ_ASYNC].lock ); + } + + atomic_dec(&cq->refcount); + wait_event(&cq->wait, !atomic_read(&cq->refcount)); + + if (cq->is_kernel) { + mthca_free_cq_buf(dev, cq); + if (mthca_is_memfree(dev)) { + mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); + mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index); + } + } + + mthca_table_put(dev, dev->cq_table.table, cq->cqn); + mthca_free(&dev->cq_table.alloc, cq->cqn); + mthca_free_mailbox(dev, mailbox); +} + +int __devinit mthca_init_cq_table(struct mthca_dev *dev) +{ + int err; + + spin_lock_init(&dev->cq_table.lock); + + err = mthca_alloc_init(&dev->cq_table.alloc, + dev->limits.num_cqs, + (1 << 24) - 1, + dev->limits.reserved_cqs); + if (err) + return err; + + err = mthca_array_init(&dev->cq_table.cq, + dev->limits.num_cqs); + if (err) + mthca_alloc_cleanup(&dev->cq_table.alloc); + + return err; +} + +void __devexit mthca_cleanup_cq_table(struct mthca_dev *dev) +{ + mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs); + mthca_alloc_cleanup(&dev->cq_table.alloc); +} diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_dev.h b/branches/MTHCA/hw/mthca/kernel/mthca_dev.h new file mode 100644 index 00000000..adf0f84e --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_dev.h @@ -0,0 +1,530 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_dev.h 3047 2005-08-10 03:59:35Z roland $ + */ + +#ifndef MTHCA_DEV_H +#define MTHCA_DEV_H + +#include "hca_driver.h" +#include "mthca_provider.h" +#include "mthca_doorbell.h" + +#define DRV_NAME "ib_mthca" +#define PFX DRV_NAME ": " +#define DRV_VERSION "1.30" +#define DRV_RELDATE "Sep 10, 2005" + +enum { + MTHCA_FLAG_DDR_HIDDEN = 1 << 1, + MTHCA_FLAG_SRQ = 1 << 2, + MTHCA_FLAG_MSI = 1 << 3, + MTHCA_FLAG_MSI_X = 1 << 4, + MTHCA_FLAG_NO_LAM = 1 << 5, + MTHCA_FLAG_FMR = 1 << 6, + MTHCA_FLAG_MEMFREE = 1 << 7, + MTHCA_FLAG_PCIE = 1 << 8 +}; + +enum { + MTHCA_MAX_PORTS = 2 +}; + +enum { + MTHCA_BOARD_ID_LEN = 64 +}; + +enum { + MTHCA_EQ_CONTEXT_SIZE = 0x40, + MTHCA_CQ_CONTEXT_SIZE = 0x40, + MTHCA_QP_CONTEXT_SIZE = 0x200, + MTHCA_RDB_ENTRY_SIZE = 0x20, + MTHCA_AV_SIZE = 0x20, + MTHCA_MGM_ENTRY_SIZE = 0x40, + + /* Arbel FW gives us these, but we need them for Tavor */ + MTHCA_MPT_ENTRY_SIZE = 0x40, + MTHCA_MTT_SEG_SIZE = 0x40, +}; + +enum { + MTHCA_EQ_CMD, + MTHCA_EQ_ASYNC, + MTHCA_EQ_COMP, + MTHCA_NUM_EQ +}; + +enum { + MTHCA_OPCODE_NOP = 0x00, + MTHCA_OPCODE_RDMA_WRITE = 0x08, + MTHCA_OPCODE_RDMA_WRITE_IMM = 0x09, + MTHCA_OPCODE_SEND = 0x0a, + MTHCA_OPCODE_SEND_IMM = 0x0b, + MTHCA_OPCODE_RDMA_READ = 0x10, + MTHCA_OPCODE_ATOMIC_CS = 0x11, + MTHCA_OPCODE_ATOMIC_FA = 0x12, + MTHCA_OPCODE_BIND_MW = 0x18, + MTHCA_OPCODE_INVALID = 0xff +}; + +struct mthca_cmd { + struct pci_pool *pool; + int use_events; + KMUTEX hcr_mutex; + KSEMAPHORE poll_sem; + KSEMAPHORE event_sem; + int max_cmds; + spinlock_t context_lock; + int free_head; + struct mthca_cmd_context *context; + u16 token_mask; +}; + +struct mthca_limits { + int num_ports; + int vl_cap; + int mtu_cap; + int gid_table_len; + int pkey_table_len; + int local_ca_ack_delay; + int num_uars; + int max_sg; + int num_qps; + int reserved_qps; + int num_srqs; + int reserved_srqs; + int num_eecs; + int reserved_eecs; + int num_cqs; + int reserved_cqs; + int num_eqs; + int reserved_eqs; + int num_mpts; + int num_mtt_segs; + int fmr_reserved_mtts; + int reserved_mtts; + int reserved_mrws; + int reserved_uars; + int num_mgms; + int num_amgms; + int reserved_mcgs; + int num_pds; + int reserved_pds; +}; + +struct mthca_alloc { + u32 last; + u32 top; + u32 max; + u32 mask; + spinlock_t lock; + unsigned long *table; +}; + +struct mthca_array { + struct { + void **page; + int used; + } *page_list; +}; + +struct mthca_uar_table { + struct mthca_alloc alloc; + u64 uarc_base; + int uarc_size; +}; + +struct mthca_pd_table { + struct mthca_alloc alloc; +}; + +struct mthca_buddy { + unsigned long **bits; + int max_order; + spinlock_t lock; +}; + +struct mthca_mr_table { + struct mthca_alloc mpt_alloc; + struct mthca_buddy mtt_buddy; + struct mthca_buddy *fmr_mtt_buddy; + u64 mtt_base; + u64 mpt_base; + struct mthca_icm_table *mtt_table; + struct mthca_icm_table *mpt_table; + struct { + void __iomem *mpt_base; + SIZE_T mpt_base_size; + void __iomem *mtt_base; + SIZE_T mtt_base_size; + struct mthca_buddy mtt_buddy; + } tavor_fmr; +}; + +struct mthca_eq_table { + struct mthca_alloc alloc; + void __iomem *clr_int; + u32 clr_mask; + u32 arm_mask; + struct mthca_eq eq[MTHCA_NUM_EQ]; + u64 icm_virt; + void *icm_page; + dma_addr_t icm_dma; + int have_irq; + u8 inta_pin; + KLOCK_QUEUE_HANDLE lockh; +}; + +struct mthca_cq_table { + struct mthca_alloc alloc; + spinlock_t lock; + struct mthca_array cq; + struct mthca_icm_table *table; +}; + +struct mthca_srq_table { + struct mthca_alloc alloc; + spinlock_t lock; + struct mthca_array srq; + struct mthca_icm_table *table; +}; + +struct mthca_qp_table { + struct mthca_alloc alloc; + u32 rdb_base; + int rdb_shift; + int sqp_start; + spinlock_t lock; + struct mthca_array qp; + struct mthca_icm_table *qp_table; + struct mthca_icm_table *eqp_table; + struct mthca_icm_table *rdb_table; +}; + +struct mthca_av_table { + struct pci_pool *pool; + int num_ddr_avs; + u64 ddr_av_base; + void __iomem *av_map; + SIZE_T av_map_size; + struct mthca_alloc alloc; +}; + +struct mthca_mcg_table { + KMUTEX mutex; + struct mthca_alloc alloc; + struct mthca_icm_table *table; +}; + +struct mthca_dev { + struct ib_device ib_dev; + hca_dev_ext_t *ext; + + int hca_type; + unsigned long mthca_flags; + unsigned long device_cap_flags; + + u32 rev_id; + char board_id[MTHCA_BOARD_ID_LEN]; + + /* firmware info */ + u64 fw_ver; + union { + struct { + u64 fw_start; + u64 fw_end; + } tavor; + struct { + u64 clr_int_base; + u64 eq_arm_base; + u64 eq_set_ci_base; + struct mthca_icm *fw_icm; + struct mthca_icm *aux_icm; + u16 fw_pages; + } arbel; + } fw; + + u64 ddr_start; + u64 ddr_end; + + MTHCA_DECLARE_DOORBELL_LOCK(doorbell_lock) + KMUTEX cap_mask_mutex; + + u8 __iomem *hcr; + SIZE_T hcr_size; + u8 __iomem *kar; + SIZE_T kar_size; + u8 __iomem *clr_base; + SIZE_T clr_base_size; + union { + struct { + void __iomem *ecr_base; + SIZE_T ecr_base_size; + } tavor; + struct { + void __iomem *eq_arm; + SIZE_T eq_arm_size; + void __iomem *eq_set_ci_base; + SIZE_T eq_set_ci_base_size; + } arbel; + } eq_regs; + + struct mthca_cmd cmd; + struct mthca_limits limits; + + struct mthca_uar_table uar_table; + struct mthca_pd_table pd_table; + struct mthca_mr_table mr_table; + struct mthca_eq_table eq_table; + struct mthca_cq_table cq_table; + struct mthca_srq_table srq_table; + struct mthca_qp_table qp_table; + struct mthca_av_table av_table; + struct mthca_mcg_table mcg_table; + + struct mthca_uar driver_uar; + struct mthca_db_table *db_tab; + struct mthca_pd driver_pd; + struct mthca_mr driver_mr; + + struct ib_mad_agent *send_agent[MTHCA_MAX_PORTS][2]; + struct ib_ah *sm_ah[MTHCA_MAX_PORTS]; + spinlock_t sm_lock; + u32 state; +}; + +// mthca_dev states +enum { + MTHCA_DEV_UNINITIALIZED, + MTHCA_DEV_INITIALIZED, + MTHCA_DEV_FAILED +}; + + +#define mthca_dbg _mthca_dbg +#define mthca_err _mthca_err +#define mthca_info _mthca_info +#define mthca_warn _mthca_warn + +#define MTHCA_GET(dest, source, offset) \ + do { \ + void *__p = (char *) (source) + (offset); \ + void *__q = &(dest); \ + switch (sizeof (dest)) { \ + case 1: *(u8 *)__q = *(u8 *) __p; break; \ + case 2: *(u16 *)__q = (u16)be16_to_cpup((u16 *)__p); break; \ + case 4: *(u32 *)__q = (u32)be32_to_cpup((u32 *)__p); break; \ + case 8: *(u64 *)__q = (u64)be64_to_cpup((u64 *)__p); break; \ + default: ASSERT(0); \ + } \ + } while (0) + + +#define MTHCA_PUT(dest, source, offset) \ + do { \ + void *__d = ((char *) (dest) + (offset)); \ + switch (sizeof(source)) { \ + case 1: *(u8 *) __d = (u8)(source); break; \ + case 2: *(__be16 *) __d = cpu_to_be16((u16)source); break; \ + case 4: *(__be32 *) __d = cpu_to_be32((u32)source); break; \ + case 8: *(__be64 *) __d = cpu_to_be64((u64)source); break; \ + default: ASSERT(0); \ + } \ + } while (0) + +NTSTATUS mthca_reset(struct mthca_dev *mdev); + +u32 mthca_alloc(struct mthca_alloc *alloc); +void mthca_free(struct mthca_alloc *alloc, u32 obj); +int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask, + u32 reserved); +void mthca_alloc_cleanup(struct mthca_alloc *alloc); +void *mthca_array_get(struct mthca_array *array, int index); +int mthca_array_set(struct mthca_array *array, int index, void *value); +void mthca_array_clear(struct mthca_array *array, int index); +int mthca_array_init(struct mthca_array *array, int nent); +void mthca_array_cleanup(struct mthca_array *array, int nent); +int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct, + union mthca_buf *buf, int *is_direct, struct mthca_pd *pd, + int hca_write, struct mthca_mr *mr); +void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf, + int is_direct, struct mthca_mr *mr); + +int mthca_init_uar_table(struct mthca_dev *dev); +int mthca_init_pd_table(struct mthca_dev *dev); +int mthca_init_mr_table(struct mthca_dev *dev); +int mthca_init_eq_table(struct mthca_dev *dev); +int mthca_init_cq_table(struct mthca_dev *dev); +int mthca_init_srq_table(struct mthca_dev *dev); +int mthca_init_qp_table(struct mthca_dev *dev); +int mthca_init_av_table(struct mthca_dev *dev); +int mthca_init_mcg_table(struct mthca_dev *dev); + +void mthca_cleanup_uar_table(struct mthca_dev *dev); +void mthca_cleanup_pd_table(struct mthca_dev *dev); +void mthca_cleanup_mr_table(struct mthca_dev *dev); +void mthca_cleanup_eq_table(struct mthca_dev *dev); +void mthca_cleanup_cq_table(struct mthca_dev *dev); +void mthca_cleanup_srq_table(struct mthca_dev *dev); +void mthca_cleanup_qp_table(struct mthca_dev *dev); +void mthca_cleanup_av_table(struct mthca_dev *dev); +void mthca_cleanup_mcg_table(struct mthca_dev *dev); + +int mthca_register_device(struct mthca_dev *dev); +void mthca_unregister_device(struct mthca_dev *dev); + +int mthca_uar_alloc(struct mthca_dev *dev, struct mthca_uar *uar); +void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar); + +int mthca_pd_alloc(struct mthca_dev *dev, int privileged, struct mthca_pd *pd); +void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd); + +struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size); +void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt); +int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, + int start_index, u64 *buffer_list, int list_len); +int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, + u64 iova, u64 total_size, u32 access, struct mthca_mr *mr); +int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd, + u32 access, struct mthca_mr *mr); +int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, + u64 *buffer_list, int buffer_size_shift, + int list_len, u64 iova, u64 total_size, + u32 access, struct mthca_mr *mr); +void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr); + +int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd, + u32 access, struct mthca_fmr *fmr); +int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, + int list_len, u64 iova); +void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr); +int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, + int list_len, u64 iova); +void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr); +int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr); + +int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt); +void mthca_unmap_eq_icm(struct mthca_dev *dev); + +int mthca_poll_cq(struct ib_cq *ibcq, int num_entries, + struct ib_wc *entry); +int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify); +int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify); +int mthca_init_cq(struct mthca_dev *dev, int nent, + struct mthca_ucontext *ctx, u32 pdn, + struct mthca_cq *cq); +void mthca_free_cq(struct mthca_dev *dev, + struct mthca_cq *cq); +void mthca_cq_completion(struct mthca_dev *dev, u32 cqn); +void mthca_cq_event(struct mthca_dev *dev, u32 cqn, + enum ib_event_type event_type); +void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, + struct mthca_srq *srq); + +int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, + struct ib_srq_attr *attr, struct mthca_srq *srq); +void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq); +void mthca_srq_event(struct mthca_dev *dev, u32 srqn, + enum ib_event_type event_type); +void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr); +int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); +int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); + +void mthca_qp_event(struct mthca_dev *dev, u32 qpn, + enum ib_event_type event_type); +int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask); +int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr); +int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); +int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr); +int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); +int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, + int index, int *dbd, __be32 *new_wqe); +int mthca_alloc_qp(struct mthca_dev *dev, + struct mthca_pd *pd, + struct mthca_cq *send_cq, + struct mthca_cq *recv_cq, + enum ib_qp_type_t type, + enum ib_sig_type send_policy, + struct ib_qp_cap *cap, + struct mthca_qp *qp); +int mthca_alloc_sqp(struct mthca_dev *dev, + struct mthca_pd *pd, + struct mthca_cq *send_cq, + struct mthca_cq *recv_cq, + enum ib_sig_type send_policy, + struct ib_qp_cap *cap, + int qpn, + int port, + struct mthca_sqp *sqp); +void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp); +int mthca_create_ah(struct mthca_dev *dev, + struct mthca_pd *pd, + struct ib_ah_attr *ah_attr, + struct mthca_ah *ah); +int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah); +int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah, + struct ib_ud_header *header); + +int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); +int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); + +int mthca_process_mad(struct ib_device *ibdev, + int mad_flags, + u8 port_num, + struct ib_wc *in_wc, + struct ib_grh *in_grh, + struct ib_mad *in_mad, + struct ib_mad *out_mad); +int mthca_create_agents(struct mthca_dev *dev); +void mthca_free_agents(struct mthca_dev *dev); + +static inline struct mthca_dev *to_mdev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct mthca_dev, ib_dev); +} + +static inline int mthca_is_memfree(struct mthca_dev *dev) +{ + return dev->mthca_flags & MTHCA_FLAG_MEMFREE; +} + +#endif /* MTHCA_DEV_H */ diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_doorbell.h b/branches/MTHCA/hw/mthca/kernel/mthca_doorbell.h new file mode 100644 index 00000000..762dd1e0 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_doorbell.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_doorbell.h 2905 2005-07-25 18:26:52Z roland $ + */ + +#define MTHCA_RD_DOORBELL 0x00 +#define MTHCA_SEND_DOORBELL 0x10 +#define MTHCA_RECEIVE_DOORBELL 0x18 +#define MTHCA_CQ_DOORBELL 0x20 +#define MTHCA_EQ_DOORBELL 0x28 + +#if BITS_PER_LONG == 64 +/* + * Assume that we can just write a 64-bit doorbell atomically. s390 + * actually doesn't have writeq() but S/390 systems don't even have + * PCI so we won't worry about it. + */ + +#define MTHCA_DECLARE_DOORBELL_LOCK(name) +#define MTHCA_INIT_DOORBELL_LOCK(ptr) do { } while (0) +#define MTHCA_GET_DOORBELL_LOCK(ptr) (NULL) + +static inline void mthca_write64_raw(__be64 val, void __iomem *dest) +{ + __raw_writeq((__force u64) val, dest); +} + +static inline void mthca_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + __raw_writeq(*(u64 *) val, dest); +} + +static inline void mthca_write_db_rec(__be32 val[2], __be32 *db) +{ + *(u64 *) db = *(u64 *) val; +} + +#else + +/* + * Just fall back to a spinlock to protect the doorbell if + * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit + * MMIO writes. + */ + +#define MTHCA_DECLARE_DOORBELL_LOCK(name) spinlock_t name; +#define MTHCA_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr) +#define MTHCA_GET_DOORBELL_LOCK(ptr) (ptr) + +static inline void mthca_write64_raw(__be64 val, void __iomem *dest) +{ + __raw_writel(((__force u32 *) &val)[0], dest); + __raw_writel(((__force u32 *) &val)[1], (u8*)dest + 4); +} + +static inline void mthca_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + spin_lock_irqsave(doorbell_lock); + __raw_writel((__force u32) val[0], dest); + __raw_writel((__force u32) val[1], (u8*)dest + 4); + spin_unlock_irqrestore(doorbell_lock); +} + +static inline void mthca_write_db_rec(__be32 val[2], __be32 *db) +{ + db[0] = val[0]; + wmb(); + db[1] = val[1]; +} + +#endif diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_eq.c b/branches/MTHCA/hw/mthca/kernel/mthca_eq.c new file mode 100644 index 00000000..a1bed5d2 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_eq.c @@ -0,0 +1,1021 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_eq.c 2905 2005-07-25 18:26:52Z roland $ + */ + +#include "mthca_dev.h" +#include "mthca_cmd.h" +#include "mthca_config_reg.h" + +enum { + MTHCA_NUM_ASYNC_EQE = 0x80, + MTHCA_NUM_CMD_EQE = 0x80, + MTHCA_EQ_ENTRY_SIZE = 0x20 +}; + +/* + * Must be packed because start is 64 bits but only aligned to 32 bits. + */ +#pragma pack(push,1) +struct mthca_eq_context { + __be32 flags; + __be64 start; + __be32 logsize_usrpage; + __be32 tavor_pd; /* reserved for Arbel */ + u8 reserved1[3]; + u8 intr; + __be32 arbel_pd; /* lost_count for Tavor */ + __be32 lkey; + u32 reserved2[2]; + __be32 consumer_index; + __be32 producer_index; + u32 reserved3[4]; +} __attribute__((packed)); +#pragma pack(pop) + +#define MTHCA_EQ_STATUS_OK ( 0 << 28) +#define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28) +#define MTHCA_EQ_STATUS_WRITE_FAIL (10 << 28) +#define MTHCA_EQ_OWNER_SW ( 0 << 24) +#define MTHCA_EQ_OWNER_HW ( 1 << 24) +#define MTHCA_EQ_FLAG_TR ( 1 << 18) +#define MTHCA_EQ_FLAG_OI ( 1 << 17) +#define MTHCA_EQ_STATE_ARMED ( 1 << 8) +#define MTHCA_EQ_STATE_FIRED ( 2 << 8) +#define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 << 8) +#define MTHCA_EQ_STATE_ARBEL ( 8 << 8) + +enum { + MTHCA_EVENT_TYPE_COMP = 0x00, + MTHCA_EVENT_TYPE_PATH_MIG = 0x01, + MTHCA_EVENT_TYPE_COMM_EST = 0x02, + MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03, + MTHCA_EVENT_TYPE_SRQ_LAST_WQE = 0x13, + MTHCA_EVENT_TYPE_CQ_ERROR = 0x04, + MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, + MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06, + MTHCA_EVENT_TYPE_PATH_MIG_FAILED = 0x07, + MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, + MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, + MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, + MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08, + MTHCA_EVENT_TYPE_PORT_CHANGE = 0x09, + MTHCA_EVENT_TYPE_EQ_OVERFLOW = 0x0f, + MTHCA_EVENT_TYPE_ECC_DETECT = 0x0e, + MTHCA_EVENT_TYPE_CMD = 0x0a +}; + +#define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG) | \ + (1ULL << MTHCA_EVENT_TYPE_COMM_EST) | \ + (1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED) | \ + (1ULL << MTHCA_EVENT_TYPE_CQ_ERROR) | \ + (1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR) | \ + (1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR) | \ + (1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED) | \ + (1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ + (1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR) | \ + (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \ + (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \ + (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT)) +#define MTHCA_SRQ_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \ + (1ULL << MTHCA_EVENT_TYPE_SRQ_LAST_WQE) +#define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD) + +#define MTHCA_EQ_DB_INC_CI (1 << 24) +#define MTHCA_EQ_DB_REQ_NOT (2 << 24) +#define MTHCA_EQ_DB_DISARM_CQ (3 << 24) +#define MTHCA_EQ_DB_SET_CI (4 << 24) +#define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24) + +#pragma pack(push,1) +struct mthca_eqe { + u8 reserved1; + u8 type; + u8 reserved2; + u8 subtype; + union { + u32 raw[6]; + struct { + __be32 cqn; + } __attribute__((packed)) comp; + struct { + u16 reserved1; + __be16 token; + u32 reserved2; + u8 reserved3[3]; + u8 status; + __be64 out_param; + } __attribute__((packed)) cmd; + struct { + __be32 qpn; + } __attribute__((packed)) qp; + struct { + __be32 cqn; + u32 reserved1; + u8 reserved2[3]; + u8 syndrome; + } __attribute__((packed)) cq_err; + struct { + u32 reserved1[2]; + __be32 port; + } __attribute__((packed)) port_change; + } event; + u8 reserved3[3]; + u8 owner; +} __attribute__((packed)); +#pragma pack(pop) + +#define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7) +#define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7) + +static inline u64 async_mask(struct mthca_dev *dev) +{ + return dev->mthca_flags & MTHCA_FLAG_SRQ ? + MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK : + MTHCA_ASYNC_EVENT_MASK; +} + +static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) +{ + __be32 doorbell[2]; + + doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_SET_CI | eq->eqn); + doorbell[1] = cpu_to_be32(ci & (eq->nent - 1)); + + /* + * This barrier makes sure that all updates to ownership bits + * done by set_eqe_hw() hit memory before the consumer index + * is updated. set_eq_ci() allows the HCA to possibly write + * more EQ entries, and we want to avoid the exceedingly + * unlikely possibility of the HCA writing an entry and then + * having set_eqe_hw() overwrite the owner field. + */ + wmb(); + mthca_write64(doorbell, + dev->kar + MTHCA_EQ_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); +} + +static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) +{ + /* See comment in tavor_set_eq_ci() above. */ + wmb(); + __raw_writel((__force u32) cpu_to_be32(ci), + (u8*)dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8); + /* We still want ordering, just not swabbing, so add a barrier */ + mb(); +} + +static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) +{ + if (mthca_is_memfree(dev)) + arbel_set_eq_ci(dev, eq, ci); + else + tavor_set_eq_ci(dev, eq, ci); +} + +static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn) +{ + __be32 doorbell[2]; + + doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_REQ_NOT | eqn); + doorbell[1] = 0; + + mthca_write64(doorbell, + dev->kar + MTHCA_EQ_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); +} + +static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask) +{ + writel(eqn_mask, dev->eq_regs.arbel.eq_arm); +} + +static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) +{ + if (!mthca_is_memfree(dev)) { + __be32 doorbell[2]; + + doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ | eqn); + doorbell[1] = cpu_to_be32(cqn); + + mthca_write64(doorbell, + dev->kar + MTHCA_EQ_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); + } +} + +static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry) +{ + unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE; + return (struct mthca_eqe *)(eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE); +} + +static inline struct mthca_eqe* next_eqe_sw(struct mthca_eq *eq) +{ + struct mthca_eqe* eqe; + eqe = get_eqe(eq, eq->cons_index); + return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe; +} + +static inline void set_eqe_hw(struct mthca_eqe *eqe) +{ + eqe->owner = MTHCA_EQ_ENTRY_OWNER_HW; +} + +static void port_change(struct mthca_dev *dev, int port, int active) +{ + struct ib_event record; + + mthca_dbg(dev, "Port change to %s for port %d\n", + active ? "active" : "down", port); + + record.device = &dev->ib_dev; + record.event = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; + record.element.port_num = (u8)port; + // Gen2 ib_core mechanism + ib_dispatch_event(&record); + // our callback + ca_event_handler( &record, &dev->ext->hca.hob ); +} + +static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) +{ + struct mthca_eqe *eqe; + int disarm_cqn; + int eqes_found = 0; + + while ((eqe = next_eqe_sw(eq))) { + int set_ci = 0; + + /* + * Make sure we read EQ entry contents after we've + * checked the ownership bit. + */ + rmb(); + + switch (eqe->type) { + case MTHCA_EVENT_TYPE_COMP: + disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; + disarm_cq(dev, eq->eqn, disarm_cqn); + mthca_cq_completion(dev, disarm_cqn); + break; + + case MTHCA_EVENT_TYPE_PATH_MIG: + mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, + IB_EVENT_PATH_MIG); + break; + + case MTHCA_EVENT_TYPE_COMM_EST: + mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, + IB_EVENT_COMM_EST); + break; + + case MTHCA_EVENT_TYPE_SQ_DRAINED: + mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, + IB_EVENT_SQ_DRAINED); + break; + + case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR: + mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, + IB_EVENT_QP_FATAL); + break; + + case MTHCA_EVENT_TYPE_PATH_MIG_FAILED: + mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, + IB_EVENT_PATH_MIG_ERR); + break; + + case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, + IB_EVENT_QP_REQ_ERR); + break; + + case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR: + mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, + IB_EVENT_QP_ACCESS_ERR); + break; + + case MTHCA_EVENT_TYPE_CMD: + mthca_cmd_event(dev, + be16_to_cpu(eqe->event.cmd.token), + eqe->event.cmd.status, + be64_to_cpu(eqe->event.cmd.out_param)); + /* + * cmd_event() may add more commands. + * The card will think the queue has overflowed if + * we don't tell it we've been processing events. + */ + set_ci = 1; + break; + + case MTHCA_EVENT_TYPE_PORT_CHANGE: + port_change(dev, + (be32_to_cpu(eqe->event.port_change.port) >> 28) & 3, + eqe->subtype == 0x4); + break; + + case MTHCA_EVENT_TYPE_CQ_ERROR: + mthca_warn(dev, "CQ %s on CQN %06x\n", + eqe->event.cq_err.syndrome == 1 ? + "overrun" : "access violation", + be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); + mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn), + IB_EVENT_CQ_ERR); + break; + + case MTHCA_EVENT_TYPE_EQ_OVERFLOW: + mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); + break; + + case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR: + case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR: + case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR: + case MTHCA_EVENT_TYPE_ECC_DETECT: + default: + mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n", + eqe->type, eqe->subtype, eq->eqn); + break; + }; + + set_eqe_hw(eqe); + ++eq->cons_index; + eqes_found = 1; + + if (unlikely(set_ci)) { + /* + * Conditional on hca_type is OK here because + * this is a rare case, not the fast path. + */ + set_eq_ci(dev, eq, eq->cons_index); + set_ci = 0; + } + } + + /* + * Rely on caller to set consumer index so that we don't have + * to test hca_type in our interrupt handling fast path. + */ + return eqes_found; +} + +static void mthca_tavor_dpc( PRKDPC dpc, + PVOID ctx, PVOID arg1, PVOID arg2 ) +{ + struct mthca_eq *eq = ctx; + struct mthca_dev *dev = eq->dev; + + spin_lock_dpc(&eq->lock); + + /* we need 'if' in case, when there were scheduled 2 DPC for one EQ */ + if (mthca_eq_int(dev, eq)) { + tavor_set_eq_ci(dev, eq, eq->cons_index); + tavor_eq_req_not(dev, eq->eqn); + } + + spin_unlock_dpc(&eq->lock); +} + +static BOOLEAN mthca_tavor_interrupt( + PKINTERRUPT int_obj, + PVOID ctx + ) +{ + struct mthca_dev *dev = ctx; + u32 ecr; + int i; + + if (dev->eq_table.clr_mask) + writel(dev->eq_table.clr_mask, dev->eq_table.clr_int); + + ecr = readl((u8*)dev->eq_regs.tavor.ecr_base + 4); + if (ecr) { + writel(ecr, (u8*)dev->eq_regs.tavor.ecr_base + + MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4); + + for (i = 0; i < MTHCA_NUM_EQ; ++i) { + if (ecr & dev->eq_table.eq[i].eqn_mask && + next_eqe_sw(&dev->eq_table.eq[i])) { + KeInsertQueueDpc(&dev->eq_table.eq[i].dpc, NULL, NULL); + } + } + + } + + return (BOOLEAN)ecr; +} + +#ifdef MSI_SUPPORT +static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr, + struct pt_regs *regs) +{ + struct mthca_eq *eq = eq_ptr; + struct mthca_dev *dev = eq->dev; + + mthca_eq_int(dev, eq); + tavor_set_eq_ci(dev, eq, eq->cons_index); + tavor_eq_req_not(dev, eq->eqn); + + /* MSI-X vectors always belong to us */ + return IRQ_HANDLED; +} +#endif + +static void mthca_arbel_dpc( PRKDPC dpc, + PVOID ctx, PVOID arg1, PVOID arg2 ) +{ + struct mthca_eq *eq = ctx; + struct mthca_dev *dev = eq->dev; + + spin_lock_dpc(&eq->lock); + + /* we need 'if' in case, when there were scheduled 2 DPC for one EQ */ + if (mthca_eq_int(dev, eq)) + arbel_set_eq_ci(dev, eq, eq->cons_index); + arbel_eq_req_not(dev, eq->eqn_mask); + + spin_unlock_dpc(&eq->lock); +} + +static BOOLEAN mthca_arbel_interrupt( + PKINTERRUPT int_obj, + PVOID ctx + ) +{ + struct mthca_dev *dev = ctx; + int work = 0; + int i; + + if (dev->eq_table.clr_mask) + writel(dev->eq_table.clr_mask, dev->eq_table.clr_int); + + for (i = 0; i < MTHCA_NUM_EQ; ++i) { + if (next_eqe_sw( &dev->eq_table.eq[i]) ) { + work = 1; + KeInsertQueueDpc(&dev->eq_table.eq[i].dpc, NULL, NULL); + } + } + + return (BOOLEAN)work; +} + +#ifdef MSI_SUPPORT +static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr, + struct pt_regs *regs) +{ + struct mthca_eq *eq = eq_ptr; + struct mthca_dev *dev = eq->dev; + + mthca_eq_int(dev, eq); + arbel_set_eq_ci(dev, eq, eq->cons_index); + arbel_eq_req_not(dev, eq->eqn_mask); + + /* MSI-X vectors always belong to us */ + return IRQ_HANDLED; +} +#endif + +static int __devinit mthca_create_eq(struct mthca_dev *dev, + int nent, + u8 intr, + struct mthca_eq *eq) +{ + int npages = (nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / + PAGE_SIZE; + u64 *dma_list = NULL; + dma_addr_t t; + struct mthca_mailbox *mailbox; + struct mthca_eq_context *eq_context; + int err = -ENOMEM; + int i; + u8 status; + + /* Make sure EQ size is aligned to a power of 2 size. */ + for (i = 1; i < nent; i <<= 1) + ; /* nothing */ + nent = i; + + eq->dev = dev; + + eq->page_list = kmalloc(npages * sizeof *eq->page_list, + GFP_KERNEL); + if (!eq->page_list) + goto err_out; + + for (i = 0; i < npages; ++i) + eq->page_list[i].buf = NULL; + + dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); + if (!dma_list) + goto err_out_free; + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + goto err_out_free; + eq_context = mailbox->buf; + + for (i = 0; i < npages; ++i) { + eq->page_list[i].buf = dma_alloc_coherent(dev, + PAGE_SIZE, &t, GFP_KERNEL); + if (!eq->page_list[i].buf) + goto err_out_free_pages; + + dma_list[i] = t; + eq->page_list[i].mapping = t; + + RtlZeroMemory(eq->page_list[i].buf, PAGE_SIZE); + } + + for (i = 0; i < nent; ++i) + set_eqe_hw(get_eqe(eq, i)); + + eq->eqn = mthca_alloc(&dev->eq_table.alloc); + if (eq->eqn == -1) + goto err_out_free_pages; + + err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num, + dma_list, PAGE_SHIFT, npages, + 0, npages * PAGE_SIZE, + MTHCA_MPT_FLAG_LOCAL_WRITE | + MTHCA_MPT_FLAG_LOCAL_READ, + &eq->mr); + if (err) + goto err_out_free_eq; + + eq->nent = nent; + + RtlZeroMemory(eq_context, sizeof *eq_context); + eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK | + MTHCA_EQ_OWNER_HW | + MTHCA_EQ_STATE_ARMED | + MTHCA_EQ_FLAG_TR); + if (mthca_is_memfree(dev)) + eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL); + + eq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); + if (mthca_is_memfree(dev)) { + eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num); + } else { + eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index); + eq_context->tavor_pd = cpu_to_be32(dev->driver_pd.pd_num); + } + eq_context->intr = intr; + eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey); + + err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status); + if (err) { + mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err); + goto err_out_free_mr; + } + if (status) { + mthca_warn(dev, "SW2HW_EQ returned status 0x%02x\n", + status); + err = -EINVAL; + goto err_out_free_mr; + } + + kfree(dma_list); + mthca_free_mailbox(dev, mailbox); + + eq->eqn_mask = swab32(1 << eq->eqn); + eq->cons_index = 0; + + dev->eq_table.arm_mask |= eq->eqn_mask; + + mthca_dbg(dev, "Allocated EQ %d with %d entries\n", + eq->eqn, nent); + + return err; + + err_out_free_mr: + mthca_free_mr(dev, &eq->mr); + + err_out_free_eq: + mthca_free(&dev->eq_table.alloc, eq->eqn); + + err_out_free_pages: + for (i = 0; i < npages; ++i) { + if (eq->page_list[i].buf) { + dma_free_coherent(dev, PAGE_SIZE, + eq->page_list[i].buf, + eq->page_list[i].mapping); + } + } + mthca_free_mailbox(dev, mailbox); + + err_out_free: + kfree(eq->page_list); + kfree(dma_list); + + err_out: + return err; +} + +static void mthca_free_eq(struct mthca_dev *dev, + struct mthca_eq *eq) +{ + struct mthca_mailbox *mailbox; + int err; + u8 status; + int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / + PAGE_SIZE; + int i; + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return; + + err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status); + if (err) + mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err); + if (status) + mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", status); + + dev->eq_table.arm_mask &= ~eq->eqn_mask; + + if (0) { + mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn); + for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) { + if (i % 4 == 0) + printk("[%02x] ", i * 4); + printk(" %08x", be32_to_cpup((u8*)mailbox->buf + i * 4)); + if ((i + 1) % 4 == 0) + printk("\n"); + } + } + + mthca_free_mr(dev, &eq->mr); + for (i = 0; i < npages; ++i) { + pci_free_consistent(dev, PAGE_SIZE, + eq->page_list[i].buf, + eq->page_list[i].mapping); + } + + kfree(eq->page_list); + mthca_free_mailbox(dev, mailbox); +} + +static void mthca_free_irqs(struct mthca_dev *dev) +{ + int i; + + if (dev->eq_table.have_irq) + free_irq(dev->ext->int_obj); +#ifdef MSI_SUPPORT + for (i = 0; i < MTHCA_NUM_EQ; ++i) + if (dev->eq_table.eq[i].have_irq) + free_irq(dev->eq_table.eq[i].msi_x_vector, + dev->eq_table.eq + i); +#endif +} + +static int __devinit mthca_map_reg(struct mthca_dev *dev, + u64 offset, unsigned long size, + void __iomem **map, SIZE_T *map_size) +{ + u64 base = pci_resource_start(dev, HCA_BAR_TYPE_HCR); + *map = ioremap(base + offset, size, map_size); + if (!*map) + return -ENOMEM; + return 0; +} + +static void mthca_unmap_reg(struct mthca_dev *dev, u64 offset, + unsigned long size, void __iomem *map, SIZE_T map_size) +{ + u64 base = pci_resource_start(dev, HCA_BAR_TYPE_HCR); + iounmap(map, map_size); +} + +static int __devinit mthca_map_eq_regs(struct mthca_dev *dev) +{ + u64 mthca_base; + + mthca_base = pci_resource_start(dev, HCA_BAR_TYPE_HCR); + + if (mthca_is_memfree(dev)) { + /* + * We assume that the EQ arm and EQ set CI registers + * fall within the first BAR. We can't trust the + * values firmware gives us, since those addresses are + * valid on the HCA's side of the PCI bus but not + * necessarily the host side. + */ + if (mthca_map_reg(dev, (pci_resource_len(dev, 0) - 1) & + dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, + &dev->clr_base, &dev->clr_base_size)) { + mthca_err(dev, "Couldn't map interrupt clear register, " + "aborting.\n"); + return -ENOMEM; + } + + /* + * Add 4 because we limit ourselves to EQs 0 ... 31, + * so we only need the low word of the register. + */ + if (mthca_map_reg(dev, ((pci_resource_len(dev, 0) - 1) & + dev->fw.arbel.eq_arm_base) + 4, 4, + &dev->eq_regs.arbel.eq_arm, &dev->eq_regs.arbel.eq_arm_size)) { + mthca_err(dev, "Couldn't map EQ arm register, aborting.\n"); + mthca_unmap_reg(dev, (pci_resource_len(dev, 0) - 1) & + dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, + dev->clr_base, dev->clr_base_size); + return -ENOMEM; + } + + if (mthca_map_reg(dev, (pci_resource_len(dev, 0) - 1) & + dev->fw.arbel.eq_set_ci_base, + MTHCA_EQ_SET_CI_SIZE, + &dev->eq_regs.arbel.eq_set_ci_base, + &dev->eq_regs.arbel.eq_set_ci_base_size + )) { + mthca_err(dev, "Couldn't map EQ CI register, aborting.\n"); + mthca_unmap_reg(dev, ((pci_resource_len(dev, 0) - 1) & + dev->fw.arbel.eq_arm_base) + 4, 4, + dev->eq_regs.arbel.eq_arm, dev->eq_regs.arbel.eq_arm_size); + mthca_unmap_reg(dev, (pci_resource_len(dev, 0) - 1) & + dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, + dev->clr_base, dev->clr_base_size); + return -ENOMEM; + } + } else { + if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE, + &dev->clr_base, &dev->clr_base_size)) { + mthca_err(dev, "Couldn't map interrupt clear register, " + "aborting.\n"); + return -ENOMEM; + } + + if (mthca_map_reg(dev, MTHCA_ECR_BASE, + MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE, + &dev->eq_regs.tavor.ecr_base, &dev->eq_regs.tavor.ecr_base_size)) { + mthca_err(dev, "Couldn't map ecr register, " + "aborting.\n"); + mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE, + dev->clr_base, dev->clr_base_size); + return -ENOMEM; + } + } + + return 0; + +} + +static void __devexit mthca_unmap_eq_regs(struct mthca_dev *dev) +{ + if (mthca_is_memfree(dev)) { + mthca_unmap_reg(dev, (pci_resource_len(dev, 0) - 1) & + dev->fw.arbel.eq_set_ci_base, + MTHCA_EQ_SET_CI_SIZE, + dev->eq_regs.arbel.eq_set_ci_base, + dev->eq_regs.arbel.eq_set_ci_base_size); + mthca_unmap_reg(dev, ((pci_resource_len(dev, 0) - 1) & + dev->fw.arbel.eq_arm_base) + 4, 4, + dev->eq_regs.arbel.eq_arm, + dev->eq_regs.arbel.eq_arm_size); + mthca_unmap_reg(dev, (pci_resource_len(dev, 0) - 1) & + dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, + dev->clr_base, dev->clr_base_size); + } else { + mthca_unmap_reg(dev, MTHCA_ECR_BASE, + MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE, + dev->eq_regs.tavor.ecr_base, + dev->eq_regs.tavor.ecr_base_size); + mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE, + dev->clr_base, dev->clr_base_size); + } +} + +int __devinit mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) +{ + int ret; + u8 status; + + /* + * We assume that mapping one page is enough for the whole EQ + * context table. This is fine with all current HCAs, because + * we only use 32 EQs and each EQ uses 32 bytes of context + * memory, or 1 KB total. + */ + dev->eq_table.icm_virt = icm_virt; + dev->eq_table.icm_page = alloc_pages(dev,0,&dev->eq_table.icm_dma); + if (!dev->eq_table.icm_page) + return -ENOMEM; + + ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt, &status); + if (!ret && status) + ret = -EINVAL; + if (ret) { + free_pages(dev, 0, dev->eq_table.icm_page, dev->eq_table.icm_dma ); + } + + return ret; +} + +void __devexit mthca_unmap_eq_icm(struct mthca_dev *dev) +{ + u8 status; + + mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, PAGE_SIZE / 4096, &status); + free_pages(dev, 0, dev->eq_table.icm_page, dev->eq_table.icm_dma ); +} + +int __devinit mthca_init_eq_table(struct mthca_dev *dev) +{ + int err; + u8 status; + u8 intr; + int i; + + err = mthca_alloc_init(&dev->eq_table.alloc, + dev->limits.num_eqs, + dev->limits.num_eqs - 1, + dev->limits.reserved_eqs); + if (err) + return err; + + err = mthca_map_eq_regs(dev); + if (err) + goto err_out_free; + +#ifdef MSI_SUPPORT + if (dev->mthca_flags & MTHCA_FLAG_MSI || + dev->mthca_flags & MTHCA_FLAG_MSI_X) { + dev->eq_table.clr_mask = 0; + } else +#endif + { + dev->eq_table.clr_mask = + swab32(1 << (dev->eq_table.inta_pin & 31)); + dev->eq_table.clr_int = dev->clr_base + + (dev->eq_table.inta_pin < 32 ? 4 : 0); + } + + dev->eq_table.arm_mask = 0; + + intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ? + 128 : dev->eq_table.inta_pin; + + err = mthca_create_eq(dev, dev->limits.num_cqs, + (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr, + &dev->eq_table.eq[MTHCA_EQ_COMP]); + if (err) + goto err_out_unmap; + + err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE, + (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr, + &dev->eq_table.eq[MTHCA_EQ_ASYNC]); + if (err) + goto err_out_comp; + + err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE, + (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr, + &dev->eq_table.eq[MTHCA_EQ_CMD]); + if (err) + goto err_out_async; + +#ifdef MSI_SUPPORT + if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { + static const char *eq_name[] = { + [MTHCA_EQ_COMP] = DRV_NAME " (comp)", + [MTHCA_EQ_ASYNC] = DRV_NAME " (async)", + [MTHCA_EQ_CMD] = DRV_NAME " (cmd)" + }; + + for (i = 0; i < MTHCA_NUM_EQ; ++i) { + err = request_irq(dev->eq_table.eq[i].msi_x_vector, + mthca_is_memfree(dev) ? + mthca_arbel_msi_x_interrupt : + mthca_tavor_msi_x_interrupt, + 0, eq_name[i], dev->eq_table.eq + i); + if (err) + goto err_out_cmd; + dev->eq_table.eq[i].have_irq = 1; + /* init DPC stuff something like that */ + spin_lock_init( &dev->eq_table.eq[i].lock ); + KeInitializeDpc( + &dev->eq_table.eq[i].dpc, + mthca_is_memfree(dev) ? + mthca_arbel_msi_x_dpc : + mthca_tavor_msi_x_dpc, + dev->eq_table.eq + i); + } + } else +#endif + { + spin_lock_init( &dev->ext->isr_lock ); + err = request_irq( + &dev->ext->interruptInfo, + &dev->ext->isr_lock.lock , + mthca_is_memfree(dev) ? mthca_arbel_interrupt : mthca_tavor_interrupt, + dev, + &dev->ext->int_obj + ); + if (err) + goto err_out_cmd; + dev->eq_table.have_irq = 1; + + /* init DPC stuff */ + for (i = 0; i < MTHCA_NUM_EQ; ++i) { + spin_lock_init( &dev->eq_table.eq[i].lock ); + KeInitializeDpc( + &dev->eq_table.eq[i].dpc, + mthca_is_memfree(dev) ? + mthca_arbel_dpc : + mthca_tavor_dpc, + dev->eq_table.eq + i); + } + } + + err = mthca_MAP_EQ(dev, async_mask(dev), + 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status); + if (err) + mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", + dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err); + if (status) + mthca_warn(dev, "MAP_EQ for async EQ %d returned status 0x%02x\n", + dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status); + + err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, + 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status); + if (err) + mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n", + dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err); + if (status) + mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n", + dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status); + + for (i = 0; i < MTHCA_EQ_CMD; ++i) + if (mthca_is_memfree(dev)) + arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask); + else + tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn); + + return 0; + +err_out_cmd: + mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]); + +err_out_async: + mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]); + +err_out_comp: + mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]); + +err_out_unmap: + mthca_unmap_eq_regs(dev); + +err_out_free: + mthca_alloc_cleanup(&dev->eq_table.alloc); + return err; +} + +void __devexit mthca_cleanup_eq_table(struct mthca_dev *dev) +{ + u8 status; + int i; + + mthca_free_irqs(dev); + + mthca_MAP_EQ(dev, async_mask(dev), + 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status); + mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, + 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status); + + for (i = 0; i < MTHCA_NUM_EQ; ++i) + mthca_free_eq(dev, &dev->eq_table.eq[i]); + + mthca_unmap_eq_regs(dev); + + mthca_alloc_cleanup(&dev->eq_table.alloc); +} diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_mad.c b/branches/MTHCA/hw/mthca/kernel/mthca_mad.c new file mode 100644 index 00000000..0d94a92c --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_mad.c @@ -0,0 +1,333 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_mad.c 2928 2005-07-28 18:45:56Z sean.hefty $ + */ + +#include +#include +#include + +#include "mthca_dev.h" +#include "mthca_cmd.h" + +enum { + MTHCA_VENDOR_CLASS1 = 0x9, + MTHCA_VENDOR_CLASS2 = 0xa +}; + +struct mthca_trap_mad { + struct ib_mad *mad; + dma_addr_t mapping; +}; + +static void update_sm_ah(struct mthca_dev *dev, + u8 port_num, u16 lid, u8 sl) +{ + struct ib_ah *new_ah; + struct ib_ah_attr ah_attr; + + if (!dev->send_agent[port_num - 1][0]) + return; + + RtlZeroMemory(&ah_attr, sizeof ah_attr); + ah_attr.dlid = lid; + ah_attr.sl = sl; + ah_attr.port_num = port_num; + +#ifdef LINUX_TO_BE_CHANGED + new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, + &ah_attr); + if (IS_ERR(new_ah)) + return; +#else + printk( KERN_ERROR " update_sm_ah: ib_create_ah not ported \n" ); + return; +#endif + + spin_lock_irqsave(&dev->sm_lock); + if (dev->sm_ah[port_num - 1]) { +#ifdef LINUX_TO_BE_CHANGED + ib_destroy_ah(dev->sm_ah[port_num - 1]); +#endif + } + dev->sm_ah[port_num - 1] = new_ah; + spin_unlock_irqrestore(&dev->sm_lock); +} + +/* + * Snoop SM MADs for port info and P_Key table sets, so we can + * synthesize LID change and P_Key change events. + */ +static void smp_snoop(struct ib_device *ibdev, + u8 port_num, + struct ib_mad *mad) +{ + struct ib_event event; + + if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || + mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && + mad->mad_hdr.method == IB_MGMT_METHOD_SET) { + if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { + update_sm_ah(to_mdev(ibdev), port_num, + be16_to_cpup((__be16 *) (mad->data + 58)), + (*(u8 *) (mad->data + 76)) & 0xf); + + event.device = ibdev; + event.event = IB_EVENT_LID_CHANGE; + event.element.port_num = port_num; + ib_dispatch_event(&event); + } + + if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { + event.device = ibdev; + event.event = IB_EVENT_PKEY_CHANGE; + event.element.port_num = port_num; + ib_dispatch_event(&event); + } + } +} + +static void forward_trap(struct mthca_dev *dev, + u8 port_num, + struct ib_mad *mad) +{ + int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; + struct mthca_trap_mad *tmad; + struct ib_sge gather_list; + struct ib_send_wr *bad_wr, wr; + struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; + int ret; + + /* fill the template */ + wr.sg_list = &gather_list; + wr.num_sge = 1; + wr.opcode = IB_WR_SEND; + wr.send_flags = IB_SEND_SIGNALED; + wr.wr.ud.remote_qpn = qpn; + wr.wr.ud.remote_qkey = qpn ? IB_QP1_QKEY : 0; + + if (agent) { + tmad = kmalloc(sizeof *tmad, GFP_KERNEL); + if (!tmad) + return; + + tmad->mad = alloc_dma_mem(dev, sizeof *tmad->mad, &tmad->mapping); + if (!tmad->mad) { + kfree(tmad); + return; + } + + memcpy(tmad->mad, mad, sizeof *mad); + + wr.wr.ud.mad_hdr = &tmad->mad->mad_hdr; + wr.wr_id = (u64)(ULONG_PTR)tmad; + gather_list.addr = tmad->mapping; + gather_list.length = sizeof *tmad->mad; + gather_list.lkey = to_mpd(agent->qp->pd)->ntmr.ibmr.lkey; + + /* + * We rely here on the fact that MLX QPs don't use the + * address handle after the send is posted (this is + * wrong following the IB spec strictly, but we know + * it's OK for our devices). + */ + spin_lock_irqsave(&dev->sm_lock); + wr.wr.ud.ah = dev->sm_ah[port_num - 1]; + if (wr.wr.ud.ah) + #ifdef LINUX_TO_BE_CHANGED + ret = ib_post_send_mad(agent, &wr, &bad_wr); + #else + { + printk( KERN_ERROR " forward_trap: ib_post_send_mad not ported \n" ); + ret = -EINVAL; + } + #endif + else + ret = -EINVAL; + spin_unlock_irqrestore(&dev->sm_lock); + + if (ret) { + free_dma_mem(dev, sizeof *tmad->mad, + tmad->mad, tmad->mapping ); + kfree(tmad); + } + } +} + +int mthca_process_mad(struct ib_device *ibdev, + int mad_flags, + u8 port_num, + struct ib_wc *in_wc, + struct ib_grh *in_grh, + struct ib_mad *in_mad, + struct ib_mad *out_mad) +{ + int err; + u8 status; + u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); + + /* Forward locally generated traps to the SM */ + if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && + slid == 0) { + forward_trap(to_mdev(ibdev), port_num, in_mad); + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; + } + + /* + * Only handle SM gets, sets and trap represses for SM class + * + * Only handle PMA and Mellanox vendor-specific class gets and + * sets for other classes. + */ + if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || + in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { + if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && + in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && + in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) + return IB_MAD_RESULT_SUCCESS; + + /* + * Don't process SMInfo queries or vendor-specific + * MADs -- the SMA can't handle them. + */ + if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO || + ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) == + IB_SMP_ATTR_VENDOR_MASK)) + return IB_MAD_RESULT_SUCCESS; + } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || + in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 || + in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) { + if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && + in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) + return IB_MAD_RESULT_SUCCESS; + } else + return IB_MAD_RESULT_SUCCESS; + + err = mthca_MAD_IFC(to_mdev(ibdev), + mad_flags & IB_MAD_IGNORE_MKEY, + mad_flags & IB_MAD_IGNORE_BKEY, + port_num, in_wc, in_grh, in_mad, out_mad, + &status); + if (err) { + mthca_err(to_mdev(ibdev), "MAD_IFC failed\n"); + return IB_MAD_RESULT_FAILURE; + } + if (status == MTHCA_CMD_STAT_BAD_PKT) + return IB_MAD_RESULT_SUCCESS; + if (status) { + mthca_err(to_mdev(ibdev), "MAD_IFC returned status %02x\n", + status); + return IB_MAD_RESULT_FAILURE; + } + + if (!out_mad->mad_hdr.status) + smp_snoop(ibdev, port_num, in_mad); + + /* set return bit in status of directed route responses */ + if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) + out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); + + if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) + /* no response for trap repress */ + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; + + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; +} + +static void send_handler(struct ib_mad_agent *agent, + struct ib_mad_send_wc *mad_send_wc) +{ + struct mthca_trap_mad *tmad = + (void *) (ULONG_PTR) mad_send_wc->wr_id; + + free_dma_mem(agent->device->mdev, sizeof *tmad->mad, + tmad->mad, tmad->mapping ); + kfree(tmad); +} + +int mthca_create_agents(struct mthca_dev *dev) +{ +#ifdef LINUX_TO_BE_CHANGED + struct ib_mad_agent *agent; + int p, q; + + spin_lock_init(&dev->sm_lock); + + for (p = 0; p < dev->limits.num_ports; ++p) + for (q = 0; q <= 1; ++q) { + agent = ib_register_mad_agent(&dev->ib_dev, p + 1, + q ? IB_QPT_QP1 : IB_QPT_QP0, + NULL, 0, send_handler, + NULL, NULL); + if (IS_ERR(agent)) + goto err; + dev->send_agent[p][q] = agent; + } + + return 0; + +err: + for (p = 0; p < dev->limits.num_ports; ++p) + for (q = 0; q <= 1; ++q) + if (dev->send_agent[p][q]) + ib_unregister_mad_agent(dev->send_agent[p][q]); + + return PTR_ERR(agent); +#else + printk( KERN_ERROR " mthca_create_agents: ib_register_mad_agent not ported \n" ); + return 0; +#endif +} + +void mthca_free_agents(struct mthca_dev *dev) +{ + struct ib_mad_agent *agent; + int p, q; + + for (p = 0; p < dev->limits.num_ports; ++p) { + for (q = 0; q <= 1; ++q) { + agent = dev->send_agent[p][q]; + dev->send_agent[p][q] = NULL; +#ifdef LINUX_TO_BE_CHANGED + ib_unregister_mad_agent(agent) +#endif + ; + } + +#ifdef LINUX_TO_BE_CHANGED + if (dev->sm_ah[p]) + ib_destroy_ah(dev->sm_ah[p]); +#endif + } +} diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_main.c b/branches/MTHCA/hw/mthca/kernel/mthca_main.c new file mode 100644 index 00000000..c857211e --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_main.c @@ -0,0 +1,1071 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_main.c 3056 2005-08-11 04:27:10Z roland $ + */ + +#include "mthca_dev.h" +#include "mthca_config_reg.h" +#include "mthca_cmd.h" +#include "mthca_profile.h" +#include "mthca_memfree.h" + +static const char mthca_version[] = + DRV_NAME ": Mellanox InfiniBand HCA driver v" + DRV_VERSION " (" DRV_RELDATE ")\n"; + +static struct mthca_profile default_profile = { + 1 << 16, // num_qp + 4, // rdb_per_qp + 0, // num_srq + 1 << 16, // num_cq + 1 << 13, // num_mcg + 1 << 17, // num_mpt + 1 << 20, // num_mtt + 1 << 15, // num_udav (Tavor only) + 0, // num_uar + 1 << 18, // uarc_size (Arbel only) + 1 << 18, // fmr_reserved_mtts (Tavor only) +}; + +/* Types of supported HCA */ +enum __hca_type { + TAVOR, /* MT23108 */ + ARBEL_COMPAT, /* MT25208 in Tavor compat mode */ + ARBEL_NATIVE, /* MT25208 with extended features */ + SINAI /* MT25204 */ +}; + +#define MTHCA_FW_VER(major, minor, subminor) \ + (((u64) (major) << 32) | ((u64) (minor) << 16) | (u64) (subminor)) + +static struct { + u64 max_unsupported_fw; + u64 min_supported_fw; + int is_memfree; + int is_pcie; +} mthca_hca_table[] = { + { MTHCA_FW_VER(3, 2, 0), MTHCA_FW_VER(3, 3, 3), 0, 0 }, + { MTHCA_FW_VER(4, 6, 0), MTHCA_FW_VER(4, 7, 0), 0, 1 }, + { MTHCA_FW_VER(5, 0, 0), MTHCA_FW_VER(5, 1, 300), 1, 1 }, + { MTHCA_FW_VER(1, 0, 0), MTHCA_FW_VER(1, 0, 1), 1, 1 } +}; + + +#define HCA(v, d, t) \ + { PCI_VENDOR_ID_##v, PCI_DEVICE_ID_MELLANOX_##d, t } + +static struct pci_device_id { + unsigned vendor; + unsigned device; + enum __hca_type driver_data; +} mthca_pci_table[] = { + HCA(MELLANOX, TAVOR, TAVOR), + HCA(MELLANOX, ARBEL_COMPAT, ARBEL_COMPAT), + HCA(MELLANOX, ARBEL, ARBEL_NATIVE), + HCA(MELLANOX, SINAI_OLD, SINAI), + HCA(MELLANOX, SINAI, SINAI), + HCA(TOPSPIN, TAVOR, TAVOR), + HCA(TOPSPIN, ARBEL_COMPAT, TAVOR), + HCA(TOPSPIN, ARBEL, ARBEL_NATIVE), + HCA(TOPSPIN, SINAI_OLD, SINAI), + HCA(TOPSPIN, SINAI, SINAI), +}; +#define MTHCA_PCI_TABLE_SIZE (sizeof(mthca_pci_table)/sizeof(struct pci_device_id)) + + +// wrapper to driver's hca_reset +static NTSTATUS mthca_reset(struct mthca_dev *mdev) +{ + PDEVICE_OBJECT pdo = mdev->ext->cl_ext.p_self_do; + return hca_reset(pdo); +} + +// wrapper to driver's hca_tune_pci +static NTSTATUS mthca_tune_pci(struct mthca_dev *mdev) +{ + PDEVICE_OBJECT pdo = mdev->ext->cl_ext.p_self_do; + return hca_tune_pci(pdo); +} + +int mthca_get_dev_info(struct mthca_dev *mdev, __be64 *node_guid, u32 *hw_id) +{ + struct ib_device_attr props; + struct ib_device *ib_dev = &mdev->ib_dev; + int err = (ib_dev->query_device )(ib_dev, &props ); + + if (err) { + mthca_err( mdev, "can't get guid - mthca_query_port() failed (%08X)\n", err ); + return err; + } + + //TODO: do we need to convert GUID to LE by be64_to_cpu(x) ? + *node_guid = props.node_guid; + *hw_id = props.hw_ver; + return 0; +} + +static struct pci_device_id * mthca_find_pci_dev(unsigned ven_id, unsigned dev_id) +{ + struct pci_device_id *p_id = mthca_pci_table; + int err; + int i; + + // find p_id (appropriate line in mthca_pci_table) + for (i = 0; i < MTHCA_PCI_TABLE_SIZE; ++i, ++p_id) { + if (p_id->device == dev_id && p_id->vendor == ven_id) + return p_id; + } + return NULL; +} + + +static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim) +{ + int err; + u8 status; + + err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status); + if (err) { + mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n"); + return err; + } + if (status) { + mthca_err(mdev, "QUERY_DEV_LIM returned status 0x%02x, " + "aborting.\n", status); + return -EINVAL; + } + if (dev_lim->min_page_sz > PAGE_SIZE) { + mthca_err(mdev, "HCA minimum page size of %d bigger than " + "kernel PAGE_SIZE of %ld, aborting.\n", + dev_lim->min_page_sz, PAGE_SIZE); + return -ENODEV; + } + if (dev_lim->num_ports > MTHCA_MAX_PORTS) { + mthca_err(mdev, "HCA has %d ports, but we only support %d, " + "aborting.\n", + dev_lim->num_ports, MTHCA_MAX_PORTS); + return -ENODEV; + } + + mdev->limits.num_ports = dev_lim->num_ports; + mdev->limits.vl_cap = dev_lim->max_vl; + mdev->limits.mtu_cap = dev_lim->max_mtu; + mdev->limits.gid_table_len = dev_lim->max_gids; + mdev->limits.pkey_table_len = dev_lim->max_pkeys; + mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay; + mdev->limits.max_sg = dev_lim->max_sg; + mdev->limits.reserved_qps = dev_lim->reserved_qps; + mdev->limits.reserved_srqs = dev_lim->reserved_srqs; + mdev->limits.reserved_eecs = dev_lim->reserved_eecs; + mdev->limits.reserved_cqs = dev_lim->reserved_cqs; + mdev->limits.reserved_eqs = dev_lim->reserved_eqs; + mdev->limits.reserved_mtts = dev_lim->reserved_mtts; + mdev->limits.reserved_mrws = dev_lim->reserved_mrws; + mdev->limits.reserved_uars = dev_lim->reserved_uars; + mdev->limits.reserved_pds = dev_lim->reserved_pds; + + /* IB_DEVICE_RESIZE_MAX_WR not supported by driver. + May be doable since hardware supports it for SRQ. + + IB_DEVICE_N_NOTIFY_CQ is supported by hardware but not by driver. + + IB_DEVICE_SRQ_RESIZE is supported by hardware but SRQ is not + supported by driver. */ + mdev->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | + IB_DEVICE_PORT_ACTIVE_EVENT | + IB_DEVICE_SYS_IMAGE_GUID | + IB_DEVICE_RC_RNR_NAK_GEN; + + if (dev_lim->flags & DEV_LIM_FLAG_BAD_PKEY_CNTR) + mdev->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; + + if (dev_lim->flags & DEV_LIM_FLAG_BAD_QKEY_CNTR) + mdev->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; + + if (dev_lim->flags & DEV_LIM_FLAG_RAW_MULTI) + mdev->device_cap_flags |= IB_DEVICE_RAW_MULTI; + + if (dev_lim->flags & DEV_LIM_FLAG_AUTO_PATH_MIG) + mdev->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; + + if (dev_lim->flags & DEV_LIM_FLAG_UD_AV_PORT_ENFORCE) + mdev->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; + + if (dev_lim->flags & DEV_LIM_FLAG_SRQ) + mdev->mthca_flags |= MTHCA_FLAG_SRQ; + + return 0; +} + +static int mthca_init_tavor(struct mthca_dev *mdev) +{ + u8 status; + int err; + struct mthca_dev_lim dev_lim; + struct mthca_profile profile; + struct mthca_init_hca_param init_hca; + + err = mthca_SYS_EN(mdev, &status); + if (err) { + mthca_err(mdev, "SYS_EN command failed, aborting.\n"); + return err; + } + if (status) { + mthca_err(mdev, "SYS_EN returned status 0x%02x, " + "aborting.\n", status); + return -EINVAL; + } + + err = mthca_QUERY_FW(mdev, &status); + if (err) { + mthca_err(mdev, "QUERY_FW command failed, aborting.\n"); + goto err_disable; + } + if (status) { + mthca_err(mdev, "QUERY_FW returned status 0x%02x, " + "aborting.\n", status); + err = -EINVAL; + goto err_disable; + } + err = mthca_QUERY_DDR(mdev, &status); + if (err) { + mthca_err(mdev, "QUERY_DDR command failed, aborting.\n"); + goto err_disable; + } + if (status) { + mthca_err(mdev, "QUERY_DDR returned status 0x%02x, " + "aborting.\n", status); + err = -EINVAL; + goto err_disable; + } + + err = mthca_dev_lim(mdev, &dev_lim); + + profile = default_profile; + profile.num_uar = dev_lim.uar_size / PAGE_SIZE; + profile.uarc_size = 0; + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + profile.num_srq = dev_lim.max_srqs; + + err = (int)mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); + if (err < 0) + goto err_disable; + + err = (int)mthca_INIT_HCA(mdev, &init_hca, &status); + if (err) { + mthca_err(mdev, "INIT_HCA command failed, aborting.\n"); + goto err_disable; + } + if (status) { + mthca_err(mdev, "INIT_HCA returned status 0x%02x, " + "aborting.\n", status); + err = -EINVAL; + goto err_disable; + } + + return 0; + +err_disable: + mthca_SYS_DIS(mdev, &status); + + return err; +} + +static int mthca_load_fw(struct mthca_dev *mdev) +{ + u8 status; + int err; + + /* FIXME: use HCA-attached memory for FW if present */ + + mdev->fw.arbel.fw_icm = + mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages, + GFP_HIGHUSER | __GFP_NOWARN); + if (!mdev->fw.arbel.fw_icm) { + mthca_err(mdev, "Couldn't allocate FW area, aborting.\n"); + return -ENOMEM; + } + + err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm, &status); + if (err) { + mthca_err(mdev, "MAP_FA command failed, aborting.\n"); + goto err_free; + } + if (status) { + mthca_err(mdev, "MAP_FA returned status 0x%02x, aborting.\n", status); + err = -EINVAL; + goto err_free; + } + err = mthca_RUN_FW(mdev, &status); + if (err) { + mthca_err(mdev, "RUN_FW command failed, aborting.\n"); + goto err_unmap_fa; + } + if (status) { + mthca_err(mdev, "RUN_FW returned status 0x%02x, aborting.\n", status); + err = -EINVAL; + goto err_unmap_fa; + } + + return 0; + +err_unmap_fa: + mthca_UNMAP_FA(mdev, &status); + +err_free: + mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); + return err; +} + +static int mthca_init_icm(struct mthca_dev *mdev, + struct mthca_dev_lim *dev_lim, + struct mthca_init_hca_param *init_hca, + u64 icm_size) +{ + u64 aux_pages; + u8 status; + int err; + + err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages, &status); + if (err) { + mthca_err(mdev, "SET_ICM_SIZE command failed, aborting.\n"); + return err; + } + if (status) { + mthca_err(mdev, "SET_ICM_SIZE returned status 0x%02x, " + "aborting.\n", status); + return -EINVAL; + } + + mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n", + (unsigned long long) icm_size >> 10, + (unsigned long long) aux_pages << 2); + + mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, (int)aux_pages, + GFP_HIGHUSER | __GFP_NOWARN); + if (!mdev->fw.arbel.aux_icm) { + mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n"); + return -ENOMEM; + } + + err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm, &status); + if (err) { + mthca_err(mdev, "MAP_ICM_AUX command failed, aborting.\n"); + goto err_free_aux; + } + if (status) { + mthca_err(mdev, "MAP_ICM_AUX returned status 0x%02x, aborting.\n", status); + err = -EINVAL; + goto err_free_aux; + } + + err = mthca_map_eq_icm(mdev, init_hca->eqc_base); + if (err) { + mthca_err(mdev, "Failed to map EQ context memory, aborting.\n"); + goto err_unmap_aux; + } + + mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base, + MTHCA_MTT_SEG_SIZE, + mdev->limits.num_mtt_segs, + mdev->limits.reserved_mtts, 1); + if (!mdev->mr_table.mtt_table) { + mthca_err(mdev, "Failed to map MTT context memory, aborting.\n"); + err = -ENOMEM; + goto err_unmap_eq; + } + + mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base, + dev_lim->mpt_entry_sz, + mdev->limits.num_mpts, + mdev->limits.reserved_mrws, 1); + if (!mdev->mr_table.mpt_table) { + mthca_err(mdev, "Failed to map MPT context memory, aborting.\n"); + err = -ENOMEM; + goto err_unmap_mtt; + } + + mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base, + dev_lim->qpc_entry_sz, + mdev->limits.num_qps, + mdev->limits.reserved_qps, 0); + if (!mdev->qp_table.qp_table) { + mthca_err(mdev, "Failed to map QP context memory, aborting.\n"); + err = -ENOMEM; + goto err_unmap_mpt; + } + + mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base, + dev_lim->eqpc_entry_sz, + mdev->limits.num_qps, + mdev->limits.reserved_qps, 0); + if (!mdev->qp_table.eqp_table) { + mthca_err(mdev, "Failed to map EQP context memory, aborting.\n"); + err = -ENOMEM; + goto err_unmap_qp; + } + + mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base, + MTHCA_RDB_ENTRY_SIZE, + mdev->limits.num_qps << + mdev->qp_table.rdb_shift, + 0, 0); + if (!mdev->qp_table.rdb_table) { + mthca_err(mdev, "Failed to map RDB context memory, aborting\n"); + err = -ENOMEM; + goto err_unmap_eqp; + } + + mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, + dev_lim->cqc_entry_sz, + mdev->limits.num_cqs, + mdev->limits.reserved_cqs, 0); + if (!mdev->cq_table.table) { + mthca_err(mdev, "Failed to map CQ context memory, aborting.\n"); + err = -ENOMEM; + goto err_unmap_rdb; + } + + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) { + mdev->srq_table.table = + mthca_alloc_icm_table(mdev, init_hca->srqc_base, + dev_lim->srq_entry_sz, + mdev->limits.num_srqs, + mdev->limits.reserved_srqs, 0); + if (!mdev->srq_table.table) { + mthca_err(mdev, "Failed to map SRQ context memory, " + "aborting.\n"); + err = -ENOMEM; + goto err_unmap_cq; + } + } + + /* + * It's not strictly required, but for simplicity just map the + * whole multicast group table now. The table isn't very big + * and it's a lot easier than trying to track ref counts. + */ + mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base, + MTHCA_MGM_ENTRY_SIZE, + mdev->limits.num_mgms + + mdev->limits.num_amgms, + mdev->limits.num_mgms + + mdev->limits.num_amgms, + 0); + if (!mdev->mcg_table.table) { + mthca_err(mdev, "Failed to map MCG context memory, aborting.\n"); + err = -ENOMEM; + goto err_unmap_srq; + } + + return 0; + +err_unmap_srq: + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + mthca_free_icm_table(mdev, mdev->srq_table.table); + +err_unmap_cq: + mthca_free_icm_table(mdev, mdev->cq_table.table); + +err_unmap_rdb: + mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); + +err_unmap_eqp: + mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); + +err_unmap_qp: + mthca_free_icm_table(mdev, mdev->qp_table.qp_table); + +err_unmap_mpt: + mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); + +err_unmap_mtt: + mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); + +err_unmap_eq: + mthca_unmap_eq_icm(mdev); + +err_unmap_aux: + mthca_UNMAP_ICM_AUX(mdev, &status); + +err_free_aux: + mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); + + return err; +} + +static int mthca_init_arbel(struct mthca_dev *mdev) +{ + struct mthca_dev_lim dev_lim; + struct mthca_profile profile; + struct mthca_init_hca_param init_hca; + u64 icm_size; + u8 status; + int err; + + err = mthca_QUERY_FW(mdev, &status); + if (err) { + mthca_err(mdev, "QUERY_FW command failed, aborting.\n"); + return err; + } + if (status) { + mthca_err(mdev, "QUERY_FW returned status 0x%02x, " + "aborting.\n", status); + return -EINVAL; + } + + err = mthca_ENABLE_LAM(mdev, &status); + if (err) { + mthca_err(mdev, "ENABLE_LAM command failed, aborting.\n"); + return err; + } + if (status == MTHCA_CMD_STAT_LAM_NOT_PRE) { + mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n"); + mdev->mthca_flags |= MTHCA_FLAG_NO_LAM; + } else if (status) { + mthca_err(mdev, "ENABLE_LAM returned status 0x%02x, " + "aborting.\n", status); + return -EINVAL; + } + + err = mthca_load_fw(mdev); + if (err) { + mthca_err(mdev, "Failed to start FW, aborting.\n"); + goto err_disable; + } + + err = mthca_dev_lim(mdev, &dev_lim); + if (err) { + mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n"); + goto err_stop_fw; + } + + profile = default_profile; + profile.num_uar = dev_lim.uar_size / PAGE_SIZE; + profile.num_udav = 0; + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + profile.num_srq = dev_lim.max_srqs; + + icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); + if ((int) icm_size < 0) { + err = (int)icm_size; + goto err_stop_fw; + } + + err = mthca_init_icm(mdev, &dev_lim, &init_hca, icm_size); + if (err) + goto err_stop_fw; + + err = mthca_INIT_HCA(mdev, &init_hca, &status); + if (err) { + mthca_err(mdev, "INIT_HCA command failed, aborting.\n"); + goto err_free_icm; + } + if (status) { + mthca_err(mdev, "INIT_HCA returned status 0x%02x, " + "aborting.\n", status); + err = -EINVAL; + goto err_free_icm; + } + + return 0; + +err_free_icm: + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + mthca_free_icm_table(mdev, mdev->srq_table.table); + mthca_free_icm_table(mdev, mdev->cq_table.table); + mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); + mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); + mthca_free_icm_table(mdev, mdev->qp_table.qp_table); + mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); + mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); + mthca_unmap_eq_icm(mdev); + + mthca_UNMAP_ICM_AUX(mdev, &status); + mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); + +err_stop_fw: + mthca_UNMAP_FA(mdev, &status); + mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); + +err_disable: + if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) + mthca_DISABLE_LAM(mdev, &status); + + return err; +} + +static void mthca_close_hca(struct mthca_dev *mdev) +{ + u8 status; + + mthca_CLOSE_HCA(mdev, 0, &status); + + if (mthca_is_memfree(mdev)) { + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + mthca_free_icm_table(mdev, mdev->srq_table.table); + mthca_free_icm_table(mdev, mdev->cq_table.table); + mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); + mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); + mthca_free_icm_table(mdev, mdev->qp_table.qp_table); + mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); + mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); + mthca_free_icm_table(mdev, mdev->mcg_table.table); + mthca_unmap_eq_icm(mdev); + + mthca_UNMAP_ICM_AUX(mdev, &status); + mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); + + mthca_UNMAP_FA(mdev, &status); + mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); + + if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) + mthca_DISABLE_LAM(mdev, &status); + } else + mthca_SYS_DIS(mdev, &status); +} + +static int mthca_init_hca(struct mthca_dev *mdev) +{ + u8 status; + int err; + struct mthca_adapter adapter; + + if (mthca_is_memfree(mdev)) + err = mthca_init_arbel(mdev); + else + err = mthca_init_tavor(mdev); + + if (err) + return err; + + err = mthca_QUERY_ADAPTER(mdev, &adapter, &status); + if (err) { + mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n"); + goto err_close; + } + if (status) { + mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, " + "aborting.\n", status); + err = -EINVAL; + goto err_close; + } + + mdev->eq_table.inta_pin = adapter.inta_pin; + mdev->rev_id = adapter.revision_id; + memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id); + + return 0; + +err_close: + mthca_close_hca(mdev); + return err; +} + +static int mthca_setup_hca(struct mthca_dev *dev) +{ + int err; + u8 status; + + MTHCA_INIT_DOORBELL_LOCK(&dev->doorbell_lock); + + err = mthca_init_uar_table(dev); + if (err) { + mthca_err(dev, "Failed to initialize " + "user access region table, aborting.\n"); + return err; + } + + err = mthca_uar_alloc(dev, &dev->driver_uar); + if (err) { + mthca_err(dev, "Failed to allocate driver access region, " + "aborting.\n"); + goto err_uar_table_free; + } + + dev->kar = ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE,&dev->kar_size); + if (!dev->kar) { + mthca_err(dev, "Couldn't map kernel access region, " + "aborting.\n"); + err = -ENOMEM; + goto err_uar_free; + } + + err = mthca_init_pd_table(dev); + if (err) { + mthca_err(dev, "Failed to initialize " + "protection domain table, aborting.\n"); + goto err_kar_unmap; + } + + err = mthca_init_mr_table(dev); + if (err) { + mthca_err(dev, "Failed to initialize " + "memory region table, aborting.\n"); + goto err_pd_table_free; + } + + err = mthca_pd_alloc(dev, 1, &dev->driver_pd); + if (err) { + mthca_err(dev, "Failed to create driver PD, " + "aborting.\n"); + goto err_mr_table_free; + } + + err = mthca_init_eq_table(dev); + if (err) { + mthca_err(dev, "Failed to initialize " + "event queue table, aborting.\n"); + goto err_pd_free; + } + + err = mthca_cmd_use_events(dev); + if (err) { + mthca_err(dev, "Failed to switch to event-driven " + "firmware commands, aborting.\n"); + goto err_eq_table_free; + } + + err = mthca_NOP(dev, &status); + if (err || status) { + mthca_err(dev, "NOP command failed to generate interrupt, aborting.\n"); + if (dev->mthca_flags & (MTHCA_FLAG_MSI | MTHCA_FLAG_MSI_X)) + mthca_err(dev, "Try again with MSI/MSI-X disabled.\n"); + else + mthca_err(dev, "BIOS or ACPI interrupt routing problem?\n"); + + goto err_cmd_poll; + } + + err = mthca_NOP(dev, &status); + if (err || status) { + mthca_err(dev, "NOP command failed to generate interrupt, aborting.\n"); + if (dev->mthca_flags & (MTHCA_FLAG_MSI | MTHCA_FLAG_MSI_X)) + mthca_err(dev, "Try again with MSI/MSI-X disabled.\n"); + else + mthca_err(dev, "BIOS or ACPI interrupt routing problem?\n"); + + goto err_cmd_poll; + } + + mthca_dbg(dev, "NOP command IRQ test passed\n"); + + err = mthca_init_cq_table(dev); + if (err) { + mthca_err(dev, "Failed to initialize " + "completion queue table, aborting.\n"); + goto err_cmd_poll; + } + + err = mthca_init_srq_table(dev); + if (err) { + mthca_err(dev, "Failed to initialize " + "shared receive queue table, aborting.\n"); + goto err_cq_table_free; + } + + err = mthca_init_qp_table(dev); + if (err) { + mthca_err(dev, "Failed to initialize " + "queue pair table, aborting.\n"); + goto err_srq_table_free; + } + + err = mthca_init_av_table(dev); + if (err) { + mthca_err(dev, "Failed to initialize " + "address vector table, aborting.\n"); + goto err_qp_table_free; + } + + err = mthca_init_mcg_table(dev); + if (err) { + mthca_err(dev, "Failed to initialize " + "multicast group table, aborting.\n"); + goto err_av_table_free; + } + + return 0; + +err_av_table_free: + mthca_cleanup_av_table(dev); + +err_qp_table_free: + mthca_cleanup_qp_table(dev); + +err_srq_table_free: + mthca_cleanup_srq_table(dev); + +err_cq_table_free: + mthca_cleanup_cq_table(dev); + +err_cmd_poll: + mthca_cmd_use_polling(dev); + +err_eq_table_free: + mthca_cleanup_eq_table(dev); + +err_pd_free: + mthca_pd_free(dev, &dev->driver_pd); + +err_mr_table_free: + mthca_cleanup_mr_table(dev); + +err_pd_table_free: + mthca_cleanup_pd_table(dev); + +err_kar_unmap: + iounmap(dev->kar, dev->kar_size); + +err_uar_free: + mthca_uar_free(dev, &dev->driver_uar); + +err_uar_table_free: + mthca_cleanup_uar_table(dev); + return err; +} + + +static int mthca_check_fw(struct mthca_dev *mdev, struct pci_device_id *p_id) +{ + int err = 0; + + if (mdev->fw_ver <= mthca_hca_table[p_id->driver_data].max_unsupported_fw) { + mthca_err(mdev, "HCA FW version %x.%x.%x is not supported. Use %x.%x.%x or higher.\n", + (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff, + (int) (mdev->fw_ver & 0xffff), + (int) (mthca_hca_table[p_id->driver_data].min_supported_fw >> 32), + (int) (mthca_hca_table[p_id->driver_data].min_supported_fw >> 16) & 0xffff, + (int) (mthca_hca_table[p_id->driver_data].min_supported_fw & 0xffff)); + err = -EINVAL; + } + else + if (mdev->fw_ver < mthca_hca_table[p_id->driver_data].min_supported_fw) { + mthca_warn(mdev, "HCA FW version %x.%x.%x is too old. Use %x.%x.%x or higher.\n", + (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff, + (int) (mdev->fw_ver & 0xffff), + (int) (mthca_hca_table[p_id->driver_data].min_supported_fw >> 32), + (int) (mthca_hca_table[p_id->driver_data].min_supported_fw >> 16) & 0xffff, + (int) (mthca_hca_table[p_id->driver_data].min_supported_fw & 0xffff)); + mthca_warn(mdev, "If you have problems, try updating your HCA FW.\n"); + } + else { + mthca_warn(mdev, "Current HCA FW version is %x.%x.%x. \n", + (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff, + (int) (mdev->fw_ver & 0xffff)); + } + + return err; +} + +NTSTATUS mthca_init_one(hca_dev_ext_t *ext) +{ + static int mthca_version_printed = 0; + int err; + NTSTATUS status; + struct mthca_dev *mdev; + struct pci_device_id *p_id; + + /* print version */ + if (!mthca_version_printed) { + printk(KERN_INFO "%s", mthca_version); + ++mthca_version_printed; + } + + /* find the type of device */ + p_id = mthca_find_pci_dev( + (unsigned)ext->hcaConfig.VendorID, + (unsigned)ext->hcaConfig.DeviceID); + if (p_id == NULL) { + status = STATUS_NO_SUCH_DEVICE; + goto end; + } + + /* allocate mdev structure */ + mdev = kmalloc(sizeof *mdev, GFP_KERNEL); + if (!mdev) { + printk(KERN_ERROR PFX "Device struct alloc failed, " + "aborting.\n"); + status = STATUS_INSUFFICIENT_RESOURCES; + goto end; + } + + /* set some fields */ + RtlZeroMemory(mdev, sizeof *mdev); + mdev->ext = ext; /* pointer to DEVICE OBJECT extension */ + ext->hca.mdev = mdev; + if (ext->hca_hidden) + mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN; + if (mthca_hca_table[p_id->driver_data].is_memfree) + mdev->mthca_flags |= MTHCA_FLAG_MEMFREE; + if (mthca_hca_table[p_id->driver_data].is_pcie) + mdev->mthca_flags |= MTHCA_FLAG_PCIE; + +//TODO: after we have a FW, capable of reset, +// write a routine, that only presses the button + + /* + * Now reset the HCA before we touch the PCI capabilities or + * attempt a firmware command, since a boot ROM may have left + * the HCA in an undefined state. + */ + status = mthca_reset(mdev); + if ( !NT_SUCCESS( status ) ) { + mthca_err(mdev, "Failed to reset HCA, aborting.\n"); + goto err_free_dev; + } + + if (ib_core_init()) { + mthca_err(mdev, "Failed to init core, aborting.\n"); + status = STATUS_UNSUCCESSFUL; + goto err_free_dev; + } + + if (mthca_cmd_init(mdev)) { + mthca_err(mdev, "Failed to init command interface, aborting.\n"); + status = STATUS_DEVICE_DATA_ERROR; + goto err_free_dev; + } + + status = mthca_tune_pci(mdev); + if ( !NT_SUCCESS( status ) ) { + goto err_cmd; + } + + err = mthca_init_hca(mdev); + if (err) { + status = STATUS_UNSUCCESSFUL; + goto err_cmd; + } + + err = mthca_check_fw(mdev, p_id); + if (err) { + status = STATUS_UNSUCCESSFUL; + goto err_close; + } + + err = mthca_setup_hca(mdev); + if (err) { + status = STATUS_UNSUCCESSFUL; + goto err_close; + } + + err = mthca_register_device(mdev); + if (err) { + status = STATUS_UNSUCCESSFUL; + goto err_cleanup; + } + + err = mthca_create_agents(mdev); + if (err) { + status = STATUS_UNSUCCESSFUL; + goto err_unregister; + } + + mdev->state = MTHCA_DEV_INITIALIZED; + return 0; + +err_unregister: + mthca_unregister_device(mdev); + +err_cleanup: + mthca_cleanup_mcg_table(mdev); + mthca_cleanup_av_table(mdev); + mthca_cleanup_qp_table(mdev); + mthca_cleanup_srq_table(mdev); + mthca_cleanup_cq_table(mdev); + mthca_cmd_use_polling(mdev); + mthca_cleanup_eq_table(mdev); + + mthca_pd_free(mdev, &mdev->driver_pd); + + mthca_cleanup_mr_table(mdev); + mthca_cleanup_pd_table(mdev); + mthca_cleanup_uar_table(mdev); + +err_close: + mthca_close_hca(mdev); + +err_cmd: + mthca_cmd_cleanup(mdev); + +err_free_dev: + kfree(mdev); + +end: + mdev->state = MTHCA_DEV_FAILED; + return status; +} + +void __devexit mthca_remove_one(hca_dev_ext_t *ext) +{ + struct mthca_dev *mdev = ext->hca.mdev; + u8 status; + int p; + + if (mdev) { + if (mdev->state != MTHCA_DEV_FAILED) { + mthca_free_agents(mdev); + mthca_unregister_device(mdev); + + for (p = 1; p <= mdev->limits.num_ports; ++p) + mthca_CLOSE_IB(mdev, p, &status); + + mthca_cleanup_mcg_table(mdev); + mthca_cleanup_av_table(mdev); + mthca_cleanup_qp_table(mdev); + mthca_cleanup_srq_table(mdev); + mthca_cleanup_cq_table(mdev); + mthca_cmd_use_polling(mdev); + mthca_cleanup_eq_table(mdev); + + mthca_pd_free(mdev, &mdev->driver_pd); + + mthca_cleanup_mr_table(mdev); + mthca_cleanup_pd_table(mdev); + + iounmap(mdev->kar, mdev->kar_size); + mthca_uar_free(mdev, &mdev->driver_uar); + mthca_cleanup_uar_table(mdev); + mthca_close_hca(mdev); + mthca_cmd_cleanup(mdev); + ib_core_cleanup(); + } + + kfree(mdev); + } +} + + + diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_mcg.c b/branches/MTHCA/hw/mthca/kernel/mthca_mcg.c new file mode 100644 index 00000000..67429d18 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_mcg.c @@ -0,0 +1,379 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_mcg.c 2905 2005-07-25 18:26:52Z roland $ + */ + +#include "mthca_dev.h" +#include "mthca_cmd.h" + +enum { + MTHCA_QP_PER_MGM = 4 * (MTHCA_MGM_ENTRY_SIZE / 16 - 2) +}; + +struct mthca_mgm { + __be32 next_gid_index; + u32 reserved[3]; + u8 gid[16]; + __be32 qp[MTHCA_QP_PER_MGM]; +}; + +static const u8 zero_gid[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; + +/* + * Caller must hold MCG table semaphore. gid and mgm parameters must + * be properly aligned for command interface. + * + * Returns 0 unless a firmware command error occurs. + * + * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 + * and *mgm holds MGM entry. + * + * if GID is found in AMGM, *index = index in AMGM, *prev = index of + * previous entry in hash chain and *mgm holds AMGM entry. + * + * If no AMGM exists for given gid, *index = -1, *prev = index of last + * entry in hash chain and *mgm holds end of hash chain. + */ +static int find_mgm(struct mthca_dev *dev, + u8 *gid, struct mthca_mailbox *mgm_mailbox, + u16 *hash, int *prev, int *index) +{ + struct mthca_mailbox *mailbox; + struct mthca_mgm *mgm = mgm_mailbox->buf; + u8 *mgid; + int err; + u8 status; + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return -ENOMEM; + mgid = mailbox->buf; + + memcpy(mgid, gid, 16); + + err = mthca_MGID_HASH(dev, mailbox, hash, &status); + if (err) + goto out; + if (status) { + mthca_err(dev, "MGID_HASH returned status %02x\n", status); + err = -EINVAL; + goto out; + } + + if (0) + mthca_dbg(dev, "Hash for %04x:%04x:%04x:%04x:" + "%04x:%04x:%04x:%04x is %04x\n", + be16_to_cpu(((__be16 *) gid)[0]), + be16_to_cpu(((__be16 *) gid)[1]), + be16_to_cpu(((__be16 *) gid)[2]), + be16_to_cpu(((__be16 *) gid)[3]), + be16_to_cpu(((__be16 *) gid)[4]), + be16_to_cpu(((__be16 *) gid)[5]), + be16_to_cpu(((__be16 *) gid)[6]), + be16_to_cpu(((__be16 *) gid)[7]), + *hash); + + *index = *hash; + *prev = -1; + + do { + err = mthca_READ_MGM(dev, *index, mgm_mailbox, &status); + if (err) + goto out; + if (status) { + mthca_err(dev, "READ_MGM returned status %02x\n", status); + return -EINVAL; + } + + if (!memcmp(mgm->gid, zero_gid, 16)) { + if (*index != *hash) { + mthca_err(dev, "Found zero MGID in AMGM.\n"); + err = -EINVAL; + } + goto out; + } + + if (!memcmp(mgm->gid, gid, 16)) + goto out; + + *prev = *index; + *index = be32_to_cpu(mgm->next_gid_index) >> 5; + } while (*index); + + *index = -1; + + out: + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) +{ + struct mthca_dev *dev = to_mdev(ibqp->device); + struct mthca_mailbox *mailbox; + struct mthca_mgm *mgm; + u16 hash; + int index, prev; + int link = 0; + int i; + int err; + u8 status; + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + mgm = mailbox->buf; + + if (down_interruptible(&dev->mcg_table.mutex)) + return -EINTR; + + err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); + if (err) + goto out; + + if (index != -1) { + if (!memcmp(mgm->gid, zero_gid, 16)) + memcpy(mgm->gid, gid->raw, 16); + } else { + link = 1; + + index = mthca_alloc(&dev->mcg_table.alloc); + if (index == -1) { + mthca_err(dev, "No AMGM entries left\n"); + err = -ENOMEM; + goto out; + } + + err = mthca_READ_MGM(dev, index, mailbox, &status); + if (err) + goto out; + if (status) { + mthca_err(dev, "READ_MGM returned status %02x\n", status); + err = -EINVAL; + goto out; + } + + memcpy(mgm->gid, gid->raw, 16); + mgm->next_gid_index = 0; + } + + for (i = 0; i < MTHCA_QP_PER_MGM; ++i) + if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) { + mgm->qp[i] = cpu_to_be32(ibqp->qp_num | (1 << 31)); + break; + } + + if (i == MTHCA_QP_PER_MGM) { + mthca_err(dev, "MGM at index %x is full.\n", index); + err = -ENOMEM; + goto out; + } + + err = mthca_WRITE_MGM(dev, index, mailbox, &status); + if (err) + goto out; + if (status) { + mthca_err(dev, "WRITE_MGM returned status %02x\n", status); + err = -EINVAL; + } + + if (!link) + goto out; + + err = mthca_READ_MGM(dev, prev, mailbox, &status); + if (err) + goto out; + if (status) { + mthca_err(dev, "READ_MGM returned status %02x\n", status); + err = -EINVAL; + goto out; + } + + mgm->next_gid_index = cpu_to_be32(index << 5); + + err = mthca_WRITE_MGM(dev, prev, mailbox, &status); + if (err) + goto out; + if (status) { + mthca_err(dev, "WRITE_MGM returned status %02x\n", status); + err = -EINVAL; + } + + out: + KeReleaseMutex(&dev->mcg_table.mutex,FALSE); + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) +{ + struct mthca_dev *dev = to_mdev(ibqp->device); + struct mthca_mailbox *mailbox; + struct mthca_mgm *mgm; + u16 hash; + int prev, index; + int i, loc; + int err; + u8 status; + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + mgm = mailbox->buf; + + if (down_interruptible(&dev->mcg_table.mutex)) + return -EINTR; + + err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); + if (err) + goto out; + + if (index == -1) { + mthca_err(dev, "MGID %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x " + "not found\n", + be16_to_cpu(((__be16 *) gid->raw)[0]), + be16_to_cpu(((__be16 *) gid->raw)[1]), + be16_to_cpu(((__be16 *) gid->raw)[2]), + be16_to_cpu(((__be16 *) gid->raw)[3]), + be16_to_cpu(((__be16 *) gid->raw)[4]), + be16_to_cpu(((__be16 *) gid->raw)[5]), + be16_to_cpu(((__be16 *) gid->raw)[6]), + be16_to_cpu(((__be16 *) gid->raw)[7])); + err = -EINVAL; + goto out; + } + + for (loc = -1, i = 0; i < MTHCA_QP_PER_MGM; ++i) { + if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) + loc = i; + if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) + break; + } + + if (loc == -1) { + mthca_err(dev, "QP %06x not found in MGM\n", ibqp->qp_num); + err = -EINVAL; + goto out; + } + + mgm->qp[loc] = mgm->qp[i - 1]; + mgm->qp[i - 1] = 0; + + err = mthca_WRITE_MGM(dev, index, mailbox, &status); + if (err) + goto out; + if (status) { + mthca_err(dev, "WRITE_MGM returned status %02x\n", status); + err = -EINVAL; + goto out; + } + + if (i != 1) + goto out; + + goto out; + + if (prev == -1) { + /* Remove entry from MGM */ + if (be32_to_cpu(mgm->next_gid_index) >> 5) { + err = mthca_READ_MGM(dev, + be32_to_cpu(mgm->next_gid_index) >> 5, + mailbox, &status); + if (err) + goto out; + if (status) { + mthca_err(dev, "READ_MGM returned status %02x\n", + status); + err = -EINVAL; + goto out; + } + } else + RtlZeroMemory(mgm->gid, 16); + + err = mthca_WRITE_MGM(dev, index, mailbox, &status); + if (err) + goto out; + if (status) { + mthca_err(dev, "WRITE_MGM returned status %02x\n", status); + err = -EINVAL; + goto out; + } + } else { + /* Remove entry from AMGM */ + index = be32_to_cpu(mgm->next_gid_index) >> 5; + err = mthca_READ_MGM(dev, prev, mailbox, &status); + if (err) + goto out; + if (status) { + mthca_err(dev, "READ_MGM returned status %02x\n", status); + err = -EINVAL; + goto out; + } + + mgm->next_gid_index = cpu_to_be32(index << 5); + + err = mthca_WRITE_MGM(dev, prev, mailbox, &status); + if (err) + goto out; + if (status) { + mthca_err(dev, "WRITE_MGM returned status %02x\n", status); + err = -EINVAL; + goto out; + } + } + + out: + KeReleaseMutex(&dev->mcg_table.mutex, FALSE); + mthca_free_mailbox(dev, mailbox); + return err; +} + +int __devinit mthca_init_mcg_table(struct mthca_dev *dev) +{ + int err; + + err = mthca_alloc_init(&dev->mcg_table.alloc, + dev->limits.num_amgms, + dev->limits.num_amgms - 1, + 0); + if (err) + return err; + + KeInitializeMutex(&dev->mcg_table.mutex,0); + + return 0; +} + +void __devexit mthca_cleanup_mcg_table(struct mthca_dev *dev) +{ + mthca_alloc_cleanup(&dev->mcg_table.alloc); +} diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_memfree.c b/branches/MTHCA/hw/mthca/kernel/mthca_memfree.c new file mode 100644 index 00000000..af8675c4 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_memfree.c @@ -0,0 +1,713 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_memfree.c 3042 2005-08-09 20:56:58Z roland $ + */ + +#include "hca_driver.h" +#include "mthca_memfree.h" +#include "mthca_dev.h" +#include "mthca_cmd.h" + +/* + * We allocate in as big chunks as we can, up to a maximum of 256 KB + * per chunk. + */ +enum { + MTHCA_ICM_ALLOC_SIZE = 1 << 18, + MTHCA_TABLE_CHUNK_SIZE = 1 << 18 +}; + +struct mthca_user_db_table { + KMUTEX mutex; + struct { + u64 uvirt; + struct scatterlist mem; + int refcount; + } page[0]; +}; + +void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm) +{ + struct mthca_icm_chunk *chunk, *tmp; + int i; + + if (!icm) + return; + + list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list,struct mthca_icm_chunk,struct mthca_icm_chunk) { + if (chunk->nsg > 0) + pci_unmap_sg(dev, chunk->mem, chunk->npages, + PCI_DMA_BIDIRECTIONAL); + + for (i = 0; i < chunk->npages; ++i) + free_pages(dev, get_order(chunk->mem[i].length), + chunk->mem[i].page, chunk->mem[i].dma_address); + + kfree(chunk); + } + + kfree(icm); +} + +/* allocate device memory of 'npages' pages as a list of chunks, each containing an array of + continuous buffers. Allocated physical pages, and then they are mapped to bus space !*/ +struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, + unsigned int gfp_mask) +{ + struct mthca_icm *icm; + struct mthca_icm_chunk *chunk = NULL; + int cur_order; + + icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); + if (!icm) + return icm; + + icm->refcount = 0; + INIT_LIST_HEAD(&icm->chunk_list); + + cur_order = get_order(MTHCA_ICM_ALLOC_SIZE); + + while (npages > 0) { + /* allocate a new chunk */ + if (!chunk) { + chunk = kmalloc(sizeof *chunk, + gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); + if (!chunk) + goto fail; + + RtlZeroMemory( chunk, sizeof *chunk ); + list_add_tail(&chunk->list, &icm->chunk_list); + } + + /* fill chunk with allocated consistent areas of integer pages each */ + while (1 << cur_order > npages) + /* try to take a max (required) number of pages */ + --cur_order; + + /* try to allocate a contiguous PHYSICAL buffer */ + chunk->mem[chunk->npages].page = alloc_pages( dev, + cur_order,&chunk->mem[chunk->npages].dma_address); + + /* if succeded - proceed handling */ + if (chunk->mem[chunk->npages].page) { + chunk->mem[chunk->npages].length = PAGE_SIZE << cur_order; + chunk->mem[chunk->npages].offset = 0; + + /* check, whether a chunk is full */ + if (++chunk->npages == MTHCA_ICM_CHUNK_LEN) { + /* it's full --> map physical addresses to bus ones */ + chunk->nsg = pci_map_sg(dev, chunk->mem, + chunk->npages, + PCI_DMA_BIDIRECTIONAL); + + if (chunk->nsg <= 0) + goto fail; + + chunk = NULL; + } + + /* calculate the remaining memory to be allocated */ + npages -= 1 << cur_order; + } + /* failed to allocate - lets decrement buffer size and try once more */ + else { + --cur_order; + if (cur_order < 0) + goto fail; + } + } + + /* last, not full chunk: map physical addresses to bus ones */ + if (chunk) { + chunk->nsg = pci_map_sg(dev, chunk->mem, + chunk->npages, + PCI_DMA_BIDIRECTIONAL); + + if (chunk->nsg <= 0) + goto fail; + } + + return icm; + +fail: + mthca_free_icm(dev, icm); + return NULL; +} + +int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) +{ + int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; + int ret = 0; + u8 status; + + down(&table->mutex); + + if (table->icm[i]) { + ++table->icm[i]->refcount; + goto out; + } + + table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT, + (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | + __GFP_NOWARN); + if (!table->icm[i]) { + ret = -ENOMEM; + goto out; + } + + if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE, + &status) || status) { + mthca_free_icm(dev, table->icm[i]); + table->icm[i] = NULL; + ret = -ENOMEM; + goto out; + } + + ++table->icm[i]->refcount; + +out: + up(&table->mutex); + return ret; +} + +void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) +{ + int i; + u8 status; + + if (!mthca_is_memfree(dev)) + return; + + i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; + + down(&table->mutex); + + if (--table->icm[i]->refcount == 0) { + mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, + MTHCA_TABLE_CHUNK_SIZE >> 12, &status); + mthca_free_icm(dev, table->icm[i]); + table->icm[i] = NULL; + } + + up(&table->mutex); +} + +void *mthca_table_find(struct mthca_icm_table *table, int obj) +{ + int idx, offset, i; + struct mthca_icm_chunk *chunk; + struct mthca_icm *icm; + struct page *page = NULL; + + if (!table->lowmem) + return NULL; + + down(&table->mutex); + + idx = (obj & (table->num_obj - 1)) * table->obj_size; + icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE]; + offset = idx % MTHCA_TABLE_CHUNK_SIZE; + + if (!icm) + goto out; + + list_for_each_entry(chunk, &icm->chunk_list, list,struct mthca_icm_chunk) { + for (i = 0; i < chunk->npages; ++i) { + if ((int)chunk->mem[i].length >= offset) { + page = chunk->mem[i].page; + break; + } + offset -= chunk->mem[i].length; + } + } + +out: + up(&table->mutex); + return page ? (char*)page + offset : NULL; +} + +int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table, + int start, int end) +{ + int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size; + int i, err; + + for (i = start; i <= end; i += inc) { + err = mthca_table_get(dev, table, i); + if (err) + goto fail; + } + + return 0; + +fail: + while (i > start) { + i -= inc; + mthca_table_put(dev, table, i); + } + + return err; +} + +void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table, + int start, int end) +{ + int i; + + if (!mthca_is_memfree(dev)) + return; + + for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size) + mthca_table_put(dev, table, i); +} + +struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev, + u64 virt, int obj_size, + int nobj, int reserved, + int use_lowmem) +{ + struct mthca_icm_table *table; + int num_icm; + unsigned chunk_size; + int i; + u8 status; + + num_icm = (obj_size * nobj + MTHCA_TABLE_CHUNK_SIZE -1) / MTHCA_TABLE_CHUNK_SIZE; + + table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL); + if (!table) + return NULL; + + table->virt = virt; + table->num_icm = num_icm; + table->num_obj = nobj; + table->obj_size = obj_size; + table->lowmem = use_lowmem; + KeInitializeMutex( &table->mutex, 0 ); + + for (i = 0; i < num_icm; ++i) + table->icm[i] = NULL; + + for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { + chunk_size = MTHCA_TABLE_CHUNK_SIZE; + if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size) + chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE; + + table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT, + (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | + __GFP_NOWARN); + if (!table->icm[i]) + goto err; + if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE, + &status) || status) { + mthca_free_icm(dev, table->icm[i]); + table->icm[i] = NULL; + goto err; + } + + /* + * Add a reference to this ICM chunk so that it never + * gets freed (since it contains reserved firmware objects). + */ + ++table->icm[i]->refcount; + } + +#if 0 + mthca_dbg(dev, "Allocated/max chunks %d:%d, reserved/max objects %#x:%#x, one/total size %#x:%#x at %llx \n", + i, num_icm, reserved, nobj, obj_size, nobj * obj_size, (unsigned long long) virt); +#endif + + return table; + +err: + for (i = 0; i < num_icm; ++i) + if (table->icm[i]) { + mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE, + MTHCA_TABLE_CHUNK_SIZE >> 12, &status); + mthca_free_icm(dev, table->icm[i]); + } + + kfree(table); + + return NULL; +} + +void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table) +{ + int i; + u8 status; + + for (i = 0; i < table->num_icm; ++i) + if (table->icm[i]) { + mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, + MTHCA_TABLE_CHUNK_SIZE >> 12, &status); + mthca_free_icm(dev, table->icm[i]); + } + +#if 0 + mthca_dbg(dev, "Released chunks %d, objects %#x, one/total size %#x:%#x at %llx \n", + table->num_icm, table->num_obj, table->obj_size, table->num_obj * table->obj_size, (unsigned long long) table->virt); +#endif + kfree(table); +} + +static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page) +{ + return dev->uar_table.uarc_base + + uar->index * dev->uar_table.uarc_size + + page * 4096; +} + +int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, + struct mthca_user_db_table *db_tab, int index, u64 uaddr) +{ + int ret = 0; + u8 status; + int i; + + if (!mthca_is_memfree(dev)) + return 0; + + if (index < 0 || index > dev->uar_table.uarc_size / 8) + return -EINVAL; + + down(&db_tab->mutex); + + i = index / MTHCA_DB_REC_PER_PAGE; + + if ((db_tab->page[i].refcount >= MTHCA_DB_REC_PER_PAGE) || + (db_tab->page[i].uvirt && db_tab->page[i].uvirt != uaddr) || + (uaddr & 4095)) { + ret = -EINVAL; + goto out; + } + + if (db_tab->page[i].refcount) { + ++db_tab->page[i].refcount; + goto out; + } + + ret = get_user_pages(dev, uaddr & PAGE_MASK, 1, 1, + &db_tab->page[i].mem.page, &db_tab->page[i].mem.p_mdl); + if (ret < 0) + goto out; + + db_tab->page[i].mem.length = 4096; + db_tab->page[i].mem.offset = (unsigned)(uaddr & ~PAGE_MASK); + + ret = pci_map_sg(dev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); + if (ret < 0) { + put_page(&db_tab->page[i].mem); + goto out; + } + + ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem), + mthca_uarc_virt(dev, uar, i), &status); + if (!ret && status) + ret = -EINVAL; + if (ret) { + pci_unmap_sg(dev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); + put_page(&db_tab->page[i].mem); + goto out; + } + + db_tab->page[i].uvirt = uaddr; + db_tab->page[i].refcount = 1; + +out: + up(&db_tab->mutex); + return ret; +} + +void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar, + struct mthca_user_db_table *db_tab, int index) +{ + if (!mthca_is_memfree(dev)) + return; + + /* + * To make our bookkeeping simpler, we don't unmap DB + * pages until we clean up the whole db table. + */ + + down(&db_tab->mutex); + + --db_tab->page[index / MTHCA_DB_REC_PER_PAGE].refcount; + + up(&db_tab->mutex); +} + +struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev) +{ + struct mthca_user_db_table *db_tab; + int npages; + int i; + + if (!mthca_is_memfree(dev)) + return NULL; + + npages = dev->uar_table.uarc_size / 4096; + db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL); + if (!db_tab) + return ERR_PTR(-ENOMEM); + + KeInitializeMutex(&db_tab->mutex,0); + for (i = 0; i < npages; ++i) { + db_tab->page[i].refcount = 0; + db_tab->page[i].uvirt = 0; + } + + return db_tab; +} + +void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, + struct mthca_user_db_table *db_tab) +{ + int i; + u8 status; + + if (!mthca_is_memfree(dev)) + return; + + for (i = 0; i < dev->uar_table.uarc_size / 4096; ++i) { + if (db_tab->page[i].uvirt) { + mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status); + pci_unmap_sg(dev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); + put_page(&db_tab->page[i].mem); + } + } +} + +int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db) +{ + int group; + int start, end, dir; + int i, j; + struct mthca_db_page *page; + int ret = 0; + u8 status; + + down(&dev->db_tab->mutex); + switch (type) { + case MTHCA_DB_TYPE_CQ_ARM: + case MTHCA_DB_TYPE_SQ: + group = 0; + start = 0; + end = dev->db_tab->max_group1; + dir = 1; + break; + + case MTHCA_DB_TYPE_CQ_SET_CI: + case MTHCA_DB_TYPE_RQ: + case MTHCA_DB_TYPE_SRQ: + group = 1; + start = dev->db_tab->npages - 1; + end = dev->db_tab->min_group2; + dir = -1; + break; + + default: + ret = -EINVAL; + goto out; + } + + /* try to find an unused index for a new page (in the bitmap) */ + for (i = start; i != end; i += dir) + if (dev->db_tab->page[i].db_rec && + !bitmap_full(dev->db_tab->page[i].used, + MTHCA_DB_REC_PER_PAGE)) { + page = dev->db_tab->page + i; + goto found; + } + + for (i = start; i != end; i += dir) { + if (!dev->db_tab->page[i].db_rec) { + page = dev->db_tab->page + i; + goto alloc; + } + } + + /* if there are no more place for DBs - get out */ + if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) { + ret = -ENOMEM; + goto out; + } + + /* fix limits indeces */ + if (group == 0) + ++dev->db_tab->max_group1; + else + --dev->db_tab->min_group2; + + /* allocate page */ + page = dev->db_tab->page + end; + +alloc: + page->db_rec = dma_alloc_coherent(dev, 4096, + &page->mapping, GFP_KERNEL); + if (!page->db_rec) { + ret = -ENOMEM; + goto out; + } + RtlZeroMemory(page->db_rec, 4096); + + ret = mthca_MAP_ICM_page(dev, page->mapping, + mthca_uarc_virt(dev, &dev->driver_uar, i), &status); + if (!ret && status) + ret = -EINVAL; + if (ret) { + dma_free_coherent(dev, 4096, + page->db_rec, page->mapping); + goto out; + } + + bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE); + +found: + j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE); + set_bit(j, page->used); + + if (group == 1) + j = MTHCA_DB_REC_PER_PAGE - 1 - j; + + ret = i * MTHCA_DB_REC_PER_PAGE + j; + + page->db_rec[j] = cpu_to_be64((((ULONGLONG)qn << 8) | (type << 5))); + + *db = (__be32 *) &page->db_rec[j]; +out: + up(&dev->db_tab->mutex); + + return ret; +} + +void mthca_free_db(struct mthca_dev *dev, int type, int db_index) +{ + int i, j; + struct mthca_db_page *page; + u8 status; + + i = db_index / MTHCA_DB_REC_PER_PAGE; + j = db_index % MTHCA_DB_REC_PER_PAGE; + + page = dev->db_tab->page + i; + + down(&dev->db_tab->mutex); + + page->db_rec[j] = 0; + if (i >= dev->db_tab->min_group2) + j = MTHCA_DB_REC_PER_PAGE - 1 - j; + clear_bit(j, page->used); + + if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) && + i >= dev->db_tab->max_group1 - 1) { + mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status); + + dma_free_coherent(dev, 4096, + page->db_rec, page->mapping); + page->db_rec = NULL; + + if (i == dev->db_tab->max_group1) { + --dev->db_tab->max_group1; + /* XXX may be able to unmap more pages now */ + } + if (i == dev->db_tab->min_group2) + ++dev->db_tab->min_group2; + } + + up(&dev->db_tab->mutex); +} + +int mthca_init_db_tab(struct mthca_dev *dev) +{ + int i; + + if (!mthca_is_memfree(dev)) + return 0; + + dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL); + if (!dev->db_tab) + return -ENOMEM; + + KeInitializeMutex(&dev->db_tab->mutex, 0); + /* number of pages, needed for UAR context table */ + dev->db_tab->npages = dev->uar_table.uarc_size / 4096; + dev->db_tab->max_group1 = 0; + dev->db_tab->min_group2 = dev->db_tab->npages - 1; + /* allocate array of structures, containing descrpitors of UARC pages */ + dev->db_tab->page = kmalloc(dev->db_tab->npages * + sizeof *dev->db_tab->page, + GFP_KERNEL); + if (!dev->db_tab->page) { + kfree(dev->db_tab); + return -ENOMEM; + } + + for (i = 0; i < dev->db_tab->npages; ++i) + dev->db_tab->page[i].db_rec = NULL; + + return 0; +} + +void mthca_cleanup_db_tab(struct mthca_dev *dev) +{ + int i; + u8 status; + + if (!mthca_is_memfree(dev)) + return; + + /* + * Because we don't always free our UARC pages when they + * become empty to make mthca_free_db() simpler we need to + * make a sweep through the doorbell pages and free any + * leftover pages now. + */ + for (i = 0; i < dev->db_tab->npages; ++i) { + if (!dev->db_tab->page[i].db_rec) + continue; + + if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE)) + mthca_warn(dev, "Kernel UARC page %d not empty\n", i); + + mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status); + + dma_free_coherent(dev, 4096, + dev->db_tab->page[i].db_rec, + dev->db_tab->page[i].mapping); + } + + kfree(dev->db_tab->page); + kfree(dev->db_tab); +} diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_memfree.h b/branches/MTHCA/hw/mthca/kernel/mthca_memfree.h new file mode 100644 index 00000000..e167f210 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_memfree.h @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_memfree.h 2905 2005-07-25 18:26:52Z roland $ + */ + +#ifndef MTHCA_MEMFREE_H +#define MTHCA_MEMFREE_H + + +#define MTHCA_ICM_CHUNK_LEN \ + ((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \ + (sizeof (struct scatterlist))) + +struct mthca_icm_chunk { + struct list_head list; + int npages; + int nsg; + struct scatterlist mem[MTHCA_ICM_CHUNK_LEN]; +}; + +struct mthca_icm { + struct list_head chunk_list; + int refcount; +}; + +struct mthca_icm_table { + u64 virt; + int num_icm; + int num_obj; + int obj_size; + int lowmem; + KMUTEX mutex; + struct mthca_icm *icm[0]; +}; + +struct mthca_icm_iter { + struct mthca_icm *icm; + struct mthca_icm_chunk *chunk; + int page_idx; +}; + +struct mthca_dev; + +struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, + unsigned int gfp_mask); +void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm); + +struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev, + u64 virt, int obj_size, + int nobj, int reserved, + int use_lowmem); +void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table); +int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj); +void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj); +void *mthca_table_find(struct mthca_icm_table *table, int obj); +int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table, + int start, int end); +void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table, + int start, int end); + +static inline void mthca_icm_first(struct mthca_icm *icm, + struct mthca_icm_iter *iter) +{ + iter->icm = icm; + iter->chunk = list_empty(&icm->chunk_list) ? + NULL : list_entry(icm->chunk_list.next, + struct mthca_icm_chunk, list); + iter->page_idx = 0; +} + +static inline int mthca_icm_last(struct mthca_icm_iter *iter) +{ + return !iter->chunk; +} + +static inline void mthca_icm_next(struct mthca_icm_iter *iter) +{ + if (++iter->page_idx >= iter->chunk->nsg) { + if (iter->chunk->list.next == &iter->icm->chunk_list) { + iter->chunk = NULL; + return; + } + + iter->chunk = list_entry(iter->chunk->list.next, + struct mthca_icm_chunk, list); + iter->page_idx = 0; + } +} + +static inline dma_addr_t mthca_icm_addr(struct mthca_icm_iter *iter) +{ + return sg_dma_address(&iter->chunk->mem[iter->page_idx]); +} + +static inline unsigned long mthca_icm_size(struct mthca_icm_iter *iter) +{ + return sg_dma_len(&iter->chunk->mem[iter->page_idx]); +} + +enum { + MTHCA_DB_REC_PER_PAGE = 4096 / 8 +}; + +struct mthca_db_page { + DECLARE_BITMAP(used, MTHCA_DB_REC_PER_PAGE); + __be64 *db_rec; + dma_addr_t mapping; +}; + +struct mthca_db_table { + int npages; + int max_group1; + int min_group2; + struct mthca_db_page *page; + KMUTEX mutex; +}; + +enum mthca_db_type { + MTHCA_DB_TYPE_INVALID = 0x0, + MTHCA_DB_TYPE_CQ_SET_CI = 0x1, + MTHCA_DB_TYPE_CQ_ARM = 0x2, + MTHCA_DB_TYPE_SQ = 0x3, + MTHCA_DB_TYPE_RQ = 0x4, + MTHCA_DB_TYPE_SRQ = 0x5, + MTHCA_DB_TYPE_GROUP_SEP = 0x7 +}; + +struct mthca_user_db_table; +struct mthca_uar; + +int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, + struct mthca_user_db_table *db_tab, int index, u64 uaddr); +void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar, + struct mthca_user_db_table *db_tab, int index); +struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev); +void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, + struct mthca_user_db_table *db_tab); + +int mthca_init_db_tab(struct mthca_dev *dev); +void mthca_cleanup_db_tab(struct mthca_dev *dev); +int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db); +void mthca_free_db(struct mthca_dev *dev, int type, int db_index); + +#endif /* MTHCA_MEMFREE_H */ diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_mr.c b/branches/MTHCA/hw/mthca/kernel/mthca_mr.c new file mode 100644 index 00000000..045284c9 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_mr.c @@ -0,0 +1,886 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_mr.c 2905 2005-07-25 18:26:52Z roland $ + */ + +#include "mthca_dev.h" +#include "mthca_cmd.h" +#include "mthca_memfree.h" + +struct mthca_mtt { + struct mthca_buddy *buddy; + int order; + u32 first_seg; +}; + +/* + * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. + */ +#pragma pack(push,1) +struct mthca_mpt_entry { + __be32 flags; + __be32 page_size; + __be32 key; + __be32 pd; + __be64 start; + __be64 length; + __be32 lkey; + __be32 window_count; + __be32 window_count_limit; + __be64 mtt_seg; + __be32 mtt_sz; /* Arbel only */ + u32 reserved[2]; +} __attribute__((packed)); +#pragma pack(pop) + +#define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28) +#define MTHCA_MPT_FLAG_MIO (1 << 17) +#define MTHCA_MPT_FLAG_BIND_ENABLE (1 << 15) +#define MTHCA_MPT_FLAG_PHYSICAL (1 << 9) +#define MTHCA_MPT_FLAG_REGION (1 << 8) + +#define MTHCA_MTT_FLAG_PRESENT 1 + +#define MTHCA_MPT_STATUS_SW 0xF0 +#define MTHCA_MPT_STATUS_HW 0x00 + +/* + * Buddy allocator for MTT segments (currently not very efficient + * since it doesn't keep a free list and just searches linearly + * through the bitmaps) + */ + +static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order) +{ + int o; + u32 m; + u32 seg; + + spin_lock(&buddy->lock); + + for (o = order; o <= buddy->max_order; ++o) { + m = 1 << (buddy->max_order - o); + seg = find_first_bit(buddy->bits[o], m); + if (seg < m) + goto found; + } + + spin_unlock(&buddy->lock); + return -1; + + found: + clear_bit(seg, buddy->bits[o]); + + while (o > order) { + --o; + seg <<= 1; + set_bit(seg ^ 1, buddy->bits[o]); + } + + spin_unlock(&buddy->lock); + + seg <<= order; + + return seg; +} + +static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order) +{ + seg >>= order; + + spin_lock(&buddy->lock); + + while (test_bit(seg ^ 1, buddy->bits[order])) { + clear_bit(seg ^ 1, buddy->bits[order]); + seg >>= 1; + ++order; + } + + set_bit(seg, buddy->bits[order]); + + spin_unlock(&buddy->lock); +} + +static int __devinit mthca_buddy_init(struct mthca_buddy *buddy, int max_order) +{ + int i, s; + + buddy->max_order = max_order; + spin_lock_init(&buddy->lock); + + buddy->bits = kmalloc((buddy->max_order + 1) * sizeof (long *), + GFP_KERNEL); + if (!buddy->bits) + goto err_out; + + RtlZeroMemory(buddy->bits, (buddy->max_order + 1) * sizeof (long *)); + + for (i = 0; i <= buddy->max_order; ++i) { + s = BITS_TO_LONGS(1 << (buddy->max_order - i)); + buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL); + if (!buddy->bits[i]) + goto err_out_free; + bitmap_zero(buddy->bits[i], + 1 << (buddy->max_order - i)); + } + + set_bit(0, buddy->bits[buddy->max_order]); + + return 0; + +err_out_free: + for (i = 0; i <= buddy->max_order; ++i) + kfree(buddy->bits[i]); + + kfree(buddy->bits); + +err_out: + return -ENOMEM; +} + +static void __devexit mthca_buddy_cleanup(struct mthca_buddy *buddy) +{ + int i; + + for (i = 0; i <= buddy->max_order; ++i) + kfree(buddy->bits[i]); + + kfree(buddy->bits); +} + +static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order, + struct mthca_buddy *buddy) +{ + u32 seg = mthca_buddy_alloc(buddy, order); + + if (seg == -1) + return -1; + + if (mthca_is_memfree(dev)) + if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg, + seg + (1 << order) - 1)) { + mthca_buddy_free(buddy, seg, order); + seg = -1; + } + + return seg; +} + +static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size, + struct mthca_buddy *buddy) +{ + struct mthca_mtt *mtt; + int i; + + if (size <= 0) + return ERR_PTR(-EINVAL); + + mtt = kmalloc(sizeof *mtt, GFP_KERNEL); + if (!mtt) + return ERR_PTR(-ENOMEM); + + mtt->buddy = buddy; + mtt->order = 0; + for (i = MTHCA_MTT_SEG_SIZE / 8; i < size; i <<= 1) + ++mtt->order; + + mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy); + if (mtt->first_seg == -1) { + kfree(mtt); + return ERR_PTR(-ENOMEM); + } + + return mtt; +} + +struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size) +{ + return __mthca_alloc_mtt(dev, size, &dev->mr_table.mtt_buddy); +} + +void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt) +{ + if (!mtt) + return; + + mthca_buddy_free(mtt->buddy, mtt->first_seg, mtt->order); + + mthca_table_put_range(dev, dev->mr_table.mtt_table, + mtt->first_seg, + mtt->first_seg + (1 << mtt->order) - 1); + + kfree(mtt); +} + +int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, + int start_index, u64 *buffer_list, int list_len) +{ + struct mthca_mailbox *mailbox; + __be64 *mtt_entry; + int err = 0; + u8 status; + int i; + u64 val = 1; + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + mtt_entry = mailbox->buf; + + while (list_len > 0) { + val = dev->mr_table.mtt_base + + mtt->first_seg * MTHCA_MTT_SEG_SIZE + start_index * 8; + mtt_entry[0] = cpu_to_be64(val); + mtt_entry[1] = 0; + for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i) { + val = buffer_list[i]; + // BUG in compiler: it can't perform OR on u64 !!! We perform OR on the low dword + *(PULONG)&val |= MTHCA_MTT_FLAG_PRESENT; + mtt_entry[i + 2] = cpu_to_be64(val); + } + + /* + * If we have an odd number of entries to write, add + * one more dummy entry for firmware efficiency. + */ + if (i & 1) + mtt_entry[i + 2] = 0; + + err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1, &status); + if (err) { + mthca_warn(dev, "WRITE_MTT failed (%d)\n", err); + goto out; + } + if (status) { + mthca_warn(dev, "WRITE_MTT returned status 0x%02x\n", + status); + err = -EINVAL; + goto out; + } + + list_len -= i; + start_index += i; + buffer_list += i; + } + +out: + mthca_free_mailbox(dev, mailbox); + return err; +} + +static inline u32 tavor_hw_index_to_key(u32 ind) +{ + return ind; +} + +static inline u32 tavor_key_to_hw_index(u32 key) +{ + return key; +} + +static inline u32 arbel_hw_index_to_key(u32 ind) +{ + return (ind >> 24) | (ind << 8); +} + +static inline u32 arbel_key_to_hw_index(u32 key) +{ + return (key << 24) | (key >> 8); +} + +static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind) +{ + if (mthca_is_memfree(dev)) + return arbel_hw_index_to_key(ind); + else + return tavor_hw_index_to_key(ind); +} + +static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key) +{ + if (mthca_is_memfree(dev)) + return arbel_key_to_hw_index(key); + else + return tavor_key_to_hw_index(key); +} + +int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, + u64 iova, u64 total_size, u32 access, struct mthca_mr *mr) +{ + struct mthca_mailbox *mailbox; + struct mthca_mpt_entry *mpt_entry; + u32 key; + int i; + int err; + u8 status; + + might_sleep(); + + WARN_ON(buffer_size_shift >= 32); + + key = mthca_alloc(&dev->mr_table.mpt_alloc); + if (key == -1) + return -ENOMEM; + mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key); + + if (mthca_is_memfree(dev)) { + err = mthca_table_get(dev, dev->mr_table.mpt_table, key); + if (err) + goto err_out_mpt_free; + } + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto err_out_table; + } + mpt_entry = mailbox->buf; + + mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS | + MTHCA_MPT_FLAG_MIO | + MTHCA_MPT_FLAG_REGION | + access); + if (!mr->mtt) + mpt_entry->flags |= cpu_to_be32(MTHCA_MPT_FLAG_PHYSICAL); + + mpt_entry->page_size = cpu_to_be32(buffer_size_shift - 12); + mpt_entry->key = cpu_to_be32(key); + mpt_entry->pd = cpu_to_be32(pd); + mpt_entry->start = cpu_to_be64(iova); + mpt_entry->length = cpu_to_be64(total_size); + + RtlZeroMemory(&mpt_entry->lkey, + sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey)); + + if (mr->mtt) + mpt_entry->mtt_seg = + cpu_to_be64(dev->mr_table.mtt_base + + mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE); + + if (0) { + mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey); + for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { + if (i % 4 == 0) + printk("[%02x] ", i * 4); + printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i])); + if ((i + 1) % 4 == 0) + printk("\n"); + } + } + + err = mthca_SW2HW_MPT(dev, mailbox, + key & (dev->limits.num_mpts - 1), + &status); + if (err) { + mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err); + goto err_out_mailbox; + } else if (status) { + mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n", + status); + err = -EINVAL; + goto err_out_mailbox; + } + + mthca_free_mailbox(dev, mailbox); + return err; + +err_out_mailbox: + mthca_free_mailbox(dev, mailbox); + +err_out_table: + mthca_table_put(dev, dev->mr_table.mpt_table, key); + +err_out_mpt_free: + mthca_free(&dev->mr_table.mpt_alloc, key); + return err; +} + +int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd, + u32 access, struct mthca_mr *mr) +{ + mr->mtt = NULL; + return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr); +} + +int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, + u64 *buffer_list, int buffer_size_shift, + int list_len, u64 iova, u64 total_size, + u32 access, struct mthca_mr *mr) +{ + int err; + + mr->mtt = mthca_alloc_mtt(dev, list_len); + if (IS_ERR(mr->mtt)) + return PTR_ERR(mr->mtt); + + err = mthca_write_mtt(dev, mr->mtt, 0, buffer_list, list_len); + if (err) { + mthca_free_mtt(dev, mr->mtt); + return err; + } + + err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova, + total_size, access, mr); + if (err) + mthca_free_mtt(dev, mr->mtt); + + return err; +} + +/* Free mr or fmr */ +static void mthca_free_region(struct mthca_dev *dev, u32 lkey) +{ + mthca_table_put(dev, dev->mr_table.mpt_table, + arbel_key_to_hw_index(lkey)); + + mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey)); +} + +void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr) +{ + int err; + u8 status; + + might_sleep(); + + err = mthca_HW2SW_MPT(dev, NULL, + key_to_hw_index(dev, mr->ibmr.lkey) & + (dev->limits.num_mpts - 1), + &status); + if (err) + mthca_warn(dev, "HW2SW_MPT failed (%d)\n", err); + else if (status) + mthca_warn(dev, "HW2SW_MPT returned status 0x%02x\n", + status); + + mthca_free_region(dev, mr->ibmr.lkey); + mthca_free_mtt(dev, mr->mtt); +} + +int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd, + u32 access, struct mthca_fmr *mr) +{ + struct mthca_mpt_entry *mpt_entry; + struct mthca_mailbox *mailbox; + u64 mtt_seg; + u32 key, idx; + u8 status; + int list_len = mr->attr.max_pages; + int err = -ENOMEM; + int i; + + might_sleep(); + + if (mr->attr.page_size < 12 || mr->attr.page_size >= 32) + return -EINVAL; + + /* For Arbel, all MTTs must fit in the same page. */ + if (mthca_is_memfree(dev) && + mr->attr.max_pages * sizeof *mr->mem.arbel.mtts > PAGE_SIZE) + return -EINVAL; + + mr->maps = 0; + + key = mthca_alloc(&dev->mr_table.mpt_alloc); + if (key == -1) + return -ENOMEM; + + idx = key & (dev->limits.num_mpts - 1); + mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key); + + if (mthca_is_memfree(dev)) { + err = mthca_table_get(dev, dev->mr_table.mpt_table, key); + if (err) + goto err_out_mpt_free; + + mr->mem.arbel.mpt = mthca_table_find(dev->mr_table.mpt_table, key); + BUG_ON(!mr->mem.arbel.mpt); + } else + mr->mem.tavor.mpt = (struct mthca_mpt_entry*)((u8*)dev->mr_table.tavor_fmr.mpt_base + + sizeof *(mr->mem.tavor.mpt) * idx); + + mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy); + if (IS_ERR(mr->mtt)) + goto err_out_table; + + mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE; + + if (mthca_is_memfree(dev)) { + mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table, + mr->mtt->first_seg); + BUG_ON(!mr->mem.arbel.mtts); + } else + mr->mem.tavor.mtts = (u64*)((u8*)dev->mr_table.tavor_fmr.mtt_base + mtt_seg); + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + goto err_out_free_mtt; + + mpt_entry = mailbox->buf; + + mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS | + MTHCA_MPT_FLAG_MIO | + MTHCA_MPT_FLAG_REGION | + access); + + mpt_entry->page_size = cpu_to_be32(mr->attr.page_size - 12); + mpt_entry->key = cpu_to_be32(key); + mpt_entry->pd = cpu_to_be32(pd); + RtlZeroMemory(&mpt_entry->start, + sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, start)); + mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base + mtt_seg); + + if (0) { + mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey); + for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { + if (i % 4 == 0) + printk("[%02x] ", i * 4); + printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i])); + if ((i + 1) % 4 == 0) + printk("\n"); + } + } + + err = mthca_SW2HW_MPT(dev, mailbox, + key & (dev->limits.num_mpts - 1), + &status); + if (err) { + mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err); + goto err_out_mailbox_free; + } + if (status) { + mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n", + status); + err = -EINVAL; + goto err_out_mailbox_free; + } + + mthca_free_mailbox(dev, mailbox); + return 0; + +err_out_mailbox_free: + mthca_free_mailbox(dev, mailbox); + +err_out_free_mtt: + mthca_free_mtt(dev, mr->mtt); + +err_out_table: + mthca_table_put(dev, dev->mr_table.mpt_table, key); + +err_out_mpt_free: + mthca_free(&dev->mr_table.mpt_alloc, mr->ibmr.lkey); + return err; +} + +int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr) +{ + if (fmr->maps) + return -EBUSY; + + mthca_free_region(dev, fmr->ibmr.lkey); + mthca_free_mtt(dev, fmr->mtt); + + return 0; +} + +static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list, + int list_len, u64 iova) +{ + int i, page_mask; + + if (list_len > fmr->attr.max_pages) + return -EINVAL; + + page_mask = (1 << fmr->attr.page_size) - 1; + + /* We are getting page lists, so va must be page aligned. */ + if (iova & page_mask) + return -EINVAL; + + /* Trust the user not to pass misaligned data in page_list */ + if (0) + for (i = 0; i < list_len; ++i) { + if (page_list[i] & ~page_mask) + return -EINVAL; + } + + if (fmr->maps >= fmr->attr.max_maps) + return -EINVAL; + + return 0; +} + + +int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, + int list_len, u64 iova) +{ + struct mthca_fmr *fmr = to_mfmr(ibfmr); + struct mthca_dev *dev = to_mdev(ibfmr->device); + struct mthca_mpt_entry mpt_entry; + u32 key; + int i, err; + + err = mthca_check_fmr(fmr, page_list, list_len, iova); + if (err) + return err; + + ++fmr->maps; + + key = tavor_key_to_hw_index(fmr->ibmr.lkey); + key += dev->limits.num_mpts; + fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key); + + writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt); + + for (i = 0; i < list_len; ++i) { + // BUG in compiler: it can't perform OR on u64 !!! We perform OR on the low dword + u64 val = page_list[i]; + __be64 mtt_entry = cpu_to_be64(val); + *(PULONG)&val |= MTHCA_MTT_FLAG_PRESENT; + mthca_write64_raw(mtt_entry, fmr->mem.tavor.mtts + i); + } + + mpt_entry.lkey = cpu_to_be32(key); + mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_size)); + mpt_entry.start = cpu_to_be64(iova); + + __raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key); + memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start, + offsetof(struct mthca_mpt_entry, window_count) - + offsetof(struct mthca_mpt_entry, start)); + + writeb(MTHCA_MPT_STATUS_HW, fmr->mem.tavor.mpt); + + return 0; +} + +int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, + int list_len, u64 iova) +{ + struct mthca_fmr *fmr = to_mfmr(ibfmr); + struct mthca_dev *dev = to_mdev(ibfmr->device); + u32 key; + int i, err; + + err = mthca_check_fmr(fmr, page_list, list_len, iova); + if (err) + return err; + + ++fmr->maps; + + key = arbel_key_to_hw_index(fmr->ibmr.lkey); + key += dev->limits.num_mpts; + fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key); + + *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW; + + wmb(); + + for (i = 0; i < list_len; ++i) { + // BUG in compiler: it can't perform OR on u64 !!! We perform OR on the low dword + u64 val = page_list[i]; + *(PULONG)&val |= MTHCA_MTT_FLAG_PRESENT; + fmr->mem.arbel.mtts[i] = cpu_to_be64(val); + } + + fmr->mem.arbel.mpt->key = cpu_to_be32(key); + fmr->mem.arbel.mpt->lkey = cpu_to_be32(key); + fmr->mem.arbel.mpt->length = cpu_to_be64(list_len * (1ull << fmr->attr.page_size)); + fmr->mem.arbel.mpt->start = cpu_to_be64(iova); + + wmb(); + + *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_HW; + + wmb(); + + return 0; +} + +void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) +{ + u32 key; + + if (!fmr->maps) + return; + + key = tavor_key_to_hw_index(fmr->ibmr.lkey); + key &= dev->limits.num_mpts - 1; + fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key); + + fmr->maps = 0; + + writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt); +} + +void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) +{ + u32 key; + + if (!fmr->maps) + return; + + key = arbel_key_to_hw_index(fmr->ibmr.lkey); + key &= dev->limits.num_mpts - 1; + fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key); + + fmr->maps = 0; + + *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW; +} + +int __devinit mthca_init_mr_table(struct mthca_dev *dev) +{ + int err, i; + + err = mthca_alloc_init(&dev->mr_table.mpt_alloc, + dev->limits.num_mpts, + ~0, dev->limits.reserved_mrws); + if (err) + return err; + + if (!mthca_is_memfree(dev) && + (dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) + dev->limits.fmr_reserved_mtts = 0; + else + dev->mthca_flags |= MTHCA_FLAG_FMR; + + err = mthca_buddy_init(&dev->mr_table.mtt_buddy, + fls(dev->limits.num_mtt_segs - 1)); + + if (err) + goto err_mtt_buddy; + + dev->mr_table.tavor_fmr.mpt_base = NULL; + dev->mr_table.tavor_fmr.mtt_base = NULL; + + if (dev->limits.fmr_reserved_mtts) { + i = fls(dev->limits.fmr_reserved_mtts - 1); + + if (i >= 31) { + mthca_warn(dev, "Unable to reserve 2^31 FMR MTTs.\n"); + err = -EINVAL; + goto err_fmr_mpt; + } + + dev->mr_table.tavor_fmr.mpt_base = + ioremap(dev->mr_table.mpt_base, + (1 << i) * sizeof (struct mthca_mpt_entry), + &dev->mr_table.tavor_fmr.mpt_base_size); + + if (!dev->mr_table.tavor_fmr.mpt_base) { + mthca_warn(dev, "MPT ioremap for FMR failed.\n"); + err = -ENOMEM; + goto err_fmr_mpt; + } + + dev->mr_table.tavor_fmr.mtt_base = + ioremap(dev->mr_table.mtt_base, + (1 << i) * MTHCA_MTT_SEG_SIZE, + &dev->mr_table.tavor_fmr.mtt_base_size ); + if (!dev->mr_table.tavor_fmr.mtt_base) { + mthca_warn(dev, "MTT ioremap for FMR failed.\n"); + err = -ENOMEM; + goto err_fmr_mtt; + } + + err = mthca_buddy_init(&dev->mr_table.tavor_fmr.mtt_buddy, i); + if (err) + goto err_fmr_mtt_buddy; + + /* Prevent regular MRs from using FMR keys */ + err = mthca_buddy_alloc(&dev->mr_table.mtt_buddy, i); + if (err) + goto err_reserve_fmr; + + dev->mr_table.fmr_mtt_buddy = + &dev->mr_table.tavor_fmr.mtt_buddy; + } else + dev->mr_table.fmr_mtt_buddy = &dev->mr_table.mtt_buddy; + + /* FMR table is always the first, take reserved MTTs out of there */ + if (dev->limits.reserved_mtts) { + i = fls(dev->limits.reserved_mtts - 1); + + if (mthca_alloc_mtt_range(dev, i, + dev->mr_table.fmr_mtt_buddy) == -1) { + mthca_warn(dev, "MTT table of order %d is too small.\n", + dev->mr_table.fmr_mtt_buddy->max_order); + err = -ENOMEM; + goto err_reserve_mtts; + } + } + + return 0; + +err_reserve_mtts: +err_reserve_fmr: + if (dev->limits.fmr_reserved_mtts) + mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy); + +err_fmr_mtt_buddy: + if (dev->mr_table.tavor_fmr.mtt_base) + iounmap(dev->mr_table.tavor_fmr.mtt_base, + dev->mr_table.tavor_fmr.mtt_base_size); + +err_fmr_mtt: + if (dev->mr_table.tavor_fmr.mpt_base) + iounmap(dev->mr_table.tavor_fmr.mpt_base, + dev->mr_table.tavor_fmr.mpt_base_size); + +err_fmr_mpt: + mthca_buddy_cleanup(&dev->mr_table.mtt_buddy); + +err_mtt_buddy: + mthca_alloc_cleanup(&dev->mr_table.mpt_alloc); + + return err; +} + +void __devexit mthca_cleanup_mr_table(struct mthca_dev *dev) +{ + /* XXX check if any MRs are still allocated? */ + if (dev->limits.fmr_reserved_mtts) + mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy); + + mthca_buddy_cleanup(&dev->mr_table.mtt_buddy); + + if (dev->mr_table.tavor_fmr.mtt_base) + iounmap(dev->mr_table.tavor_fmr.mtt_base, + dev->mr_table.tavor_fmr.mtt_base_size); + if (dev->mr_table.tavor_fmr.mpt_base) + iounmap(dev->mr_table.tavor_fmr.mpt_base, + dev->mr_table.tavor_fmr.mpt_base_size); + + mthca_alloc_cleanup(&dev->mr_table.mpt_alloc); +} diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_pd.c b/branches/MTHCA/hw/mthca/kernel/mthca_pd.c new file mode 100644 index 00000000..9f207eed --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_pd.c @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_pd.c 2803 2005-07-05 15:58:55Z roland $ + */ + +#include "mthca_dev.h" + +int mthca_pd_alloc(struct mthca_dev *dev, int privileged, struct mthca_pd *pd) +{ + int err = 0; + + might_sleep(); + + pd->privileged = privileged; + + atomic_set(&pd->sqp_count, 0); + pd->pd_num = mthca_alloc(&dev->pd_table.alloc); + if (pd->pd_num == -1) + return -ENOMEM; + + if (privileged) { + err = mthca_mr_alloc_notrans(dev, pd->pd_num, + MTHCA_MPT_FLAG_LOCAL_READ | + MTHCA_MPT_FLAG_LOCAL_WRITE, + &pd->ntmr); + if (err) + mthca_free(&dev->pd_table.alloc, pd->pd_num); + } + + return err; +} + +void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd) +{ + might_sleep(); + if (pd->privileged) + mthca_free_mr(dev, &pd->ntmr); + mthca_free(&dev->pd_table.alloc, pd->pd_num); +} + +int __devinit mthca_init_pd_table(struct mthca_dev *dev) +{ + return mthca_alloc_init(&dev->pd_table.alloc, + dev->limits.num_pds, + (1 << 24) - 1, + dev->limits.reserved_pds); +} + +void __devexit mthca_cleanup_pd_table(struct mthca_dev *dev) +{ + /* XXX check if any PDs are still allocated? */ + mthca_alloc_cleanup(&dev->pd_table.alloc); +} diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_profile.c b/branches/MTHCA/hw/mthca/kernel/mthca_profile.c new file mode 100644 index 00000000..548fdf80 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_profile.c @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_profile.c 3047 2005-08-10 03:59:35Z roland $ + */ + + +#include "mthca_profile.h" + +enum { + MTHCA_RES_QP, + MTHCA_RES_EEC, + MTHCA_RES_SRQ, + MTHCA_RES_CQ, + MTHCA_RES_EQP, + MTHCA_RES_EEEC, + MTHCA_RES_EQ, + MTHCA_RES_RDB, + MTHCA_RES_MCG, + MTHCA_RES_MPT, + MTHCA_RES_MTT, + MTHCA_RES_UAR, + MTHCA_RES_UDAV, + MTHCA_RES_UARC, + MTHCA_RES_NUM +}; + +enum { + MTHCA_NUM_EQS = 32, + MTHCA_NUM_PDS = 1 << 15 +}; + +u64 mthca_make_profile(struct mthca_dev *dev, + struct mthca_profile *request, + struct mthca_dev_lim *dev_lim, + struct mthca_init_hca_param *init_hca) +{ + struct mthca_resource { + u64 size; + u64 start; + int type; + int num; + int log_num; + }; + + u64 mem_base, mem_avail; + u64 total_size = 0; + struct mthca_resource *profile; + struct mthca_resource tmp; + int i, j; + + profile = kmalloc(MTHCA_RES_NUM * sizeof *profile, GFP_KERNEL); + if (!profile) + return -ENOMEM; + + RtlZeroMemory(profile, MTHCA_RES_NUM * sizeof *profile); + + profile[MTHCA_RES_QP].size = dev_lim->qpc_entry_sz; + profile[MTHCA_RES_EEC].size = dev_lim->eec_entry_sz; + profile[MTHCA_RES_SRQ].size = dev_lim->srq_entry_sz; + profile[MTHCA_RES_CQ].size = dev_lim->cqc_entry_sz; + profile[MTHCA_RES_EQP].size = dev_lim->eqpc_entry_sz; + profile[MTHCA_RES_EEEC].size = dev_lim->eeec_entry_sz; + profile[MTHCA_RES_EQ].size = dev_lim->eqc_entry_sz; + profile[MTHCA_RES_RDB].size = MTHCA_RDB_ENTRY_SIZE; + profile[MTHCA_RES_MCG].size = MTHCA_MGM_ENTRY_SIZE; + profile[MTHCA_RES_MPT].size = dev_lim->mpt_entry_sz; + profile[MTHCA_RES_MTT].size = MTHCA_MTT_SEG_SIZE; + profile[MTHCA_RES_UAR].size = dev_lim->uar_scratch_entry_sz; + profile[MTHCA_RES_UDAV].size = MTHCA_AV_SIZE; + profile[MTHCA_RES_UARC].size = request->uarc_size; + + profile[MTHCA_RES_QP].num = request->num_qp; + profile[MTHCA_RES_SRQ].num = request->num_srq; + profile[MTHCA_RES_EQP].num = request->num_qp; + profile[MTHCA_RES_RDB].num = request->num_qp * request->rdb_per_qp; + profile[MTHCA_RES_CQ].num = request->num_cq; + profile[MTHCA_RES_EQ].num = MTHCA_NUM_EQS; + profile[MTHCA_RES_MCG].num = request->num_mcg; + profile[MTHCA_RES_MPT].num = request->num_mpt; + profile[MTHCA_RES_MTT].num = request->num_mtt; + profile[MTHCA_RES_UAR].num = request->num_uar; + profile[MTHCA_RES_UARC].num = request->num_uar; + profile[MTHCA_RES_UDAV].num = request->num_udav; + + for (i = 0; i < MTHCA_RES_NUM; ++i) { + profile[i].type = i; + profile[i].log_num = max(ffs(profile[i].num) - 1, 0); + profile[i].size *= profile[i].num; + if (mthca_is_memfree(dev)) + profile[i].size = max(profile[i].size, (u64) PAGE_SIZE); + } + + if (mthca_is_memfree(dev)) { + mem_base = 0; + mem_avail = dev_lim->hca.arbel.max_icm_sz; + } else { + mem_base = dev->ddr_start; + mem_avail = dev->fw.tavor.fw_start - dev->ddr_start; + } + + /* + * Sort the resources in decreasing order of size. Since they + * all have sizes that are powers of 2, we'll be able to keep + * resources aligned to their size and pack them without gaps + * using the sorted order. + */ + for (i = MTHCA_RES_NUM; i > 0; --i) + for (j = 1; j < i; ++j) { + if (profile[j].size > profile[j - 1].size) { + tmp = profile[j]; + profile[j] = profile[j - 1]; + profile[j - 1] = tmp; + } + } + + for (i = 0; i < MTHCA_RES_NUM; ++i) { + if (profile[i].size) { + profile[i].start = mem_base + total_size; + total_size += profile[i].size; + } + if (total_size > mem_avail) { + mthca_err(dev, "Profile requires 0x%llx bytes; " + "won't in 0x%llx bytes of context memory.\n", + (unsigned long long) total_size, + (unsigned long long) mem_avail); + kfree(profile); + return -ENOMEM; + } + + if (profile[i].size) + mthca_dbg(dev, "profile[%2d]--%2d/%2d @ 0x%16llx " + "(size 0x%8llx)\n", + i, profile[i].type, profile[i].log_num, + (unsigned long long) profile[i].start, + (unsigned long long) profile[i].size); + } + + if (mthca_is_memfree(dev)) + mthca_dbg(dev, "HCA context memory: reserving %d KB\n", + (int) (total_size >> 10)); + else + mthca_dbg(dev, "HCA memory: allocated %d KB/%d KB (%d KB free)\n", + (int) (total_size >> 10), (int) (mem_avail >> 10), + (int) ((mem_avail - total_size) >> 10)); + + for (i = 0; i < MTHCA_RES_NUM; ++i) { + int mc_entry_sz = MTHCA_MGM_ENTRY_SIZE; + int mtt_seg_sz = MTHCA_MTT_SEG_SIZE; + + switch (profile[i].type) { + case MTHCA_RES_QP: + dev->limits.num_qps = profile[i].num; + init_hca->qpc_base = profile[i].start; + init_hca->log_num_qps = (u8)profile[i].log_num; + break; + case MTHCA_RES_EEC: + dev->limits.num_eecs = profile[i].num; + init_hca->eec_base = profile[i].start; + init_hca->log_num_eecs = (u8)profile[i].log_num; + break; + case MTHCA_RES_SRQ: + dev->limits.num_srqs = profile[i].num; + init_hca->srqc_base = profile[i].start; + init_hca->log_num_srqs = (u8)profile[i].log_num; + break; + case MTHCA_RES_CQ: + dev->limits.num_cqs = profile[i].num; + init_hca->cqc_base = profile[i].start; + init_hca->log_num_cqs = (u8)profile[i].log_num; + break; + case MTHCA_RES_EQP: + init_hca->eqpc_base = profile[i].start; + break; + case MTHCA_RES_EEEC: + init_hca->eeec_base = profile[i].start; + break; + case MTHCA_RES_EQ: + dev->limits.num_eqs = profile[i].num; + init_hca->eqc_base = profile[i].start; + init_hca->log_num_eqs = (u8)profile[i].log_num; + break; + case MTHCA_RES_RDB: + for (dev->qp_table.rdb_shift = 0; + request->num_qp << dev->qp_table.rdb_shift < profile[i].num; + ++dev->qp_table.rdb_shift) + ; /* nothing */ + dev->qp_table.rdb_base = (u32) profile[i].start; + init_hca->rdb_base = profile[i].start; + break; + case MTHCA_RES_MCG: + dev->limits.num_mgms = profile[i].num >> 1; + dev->limits.num_amgms = profile[i].num >> 1; + init_hca->mc_base = profile[i].start; + init_hca->log_mc_entry_sz = ffs(mc_entry_sz) - 1; + init_hca->log_mc_table_sz = (u8)profile[i].log_num; + init_hca->mc_hash_sz = 1 << (profile[i].log_num - 1); + break; + case MTHCA_RES_MPT: + dev->limits.num_mpts = profile[i].num; + dev->mr_table.mpt_base = profile[i].start; + init_hca->mpt_base = profile[i].start; + init_hca->log_mpt_sz = (u8)profile[i].log_num; + break; + case MTHCA_RES_MTT: + dev->limits.num_mtt_segs = profile[i].num; + dev->mr_table.mtt_base = profile[i].start; + init_hca->mtt_base = profile[i].start; + init_hca->mtt_seg_sz = ffs(mtt_seg_sz) - 7; + break; + case MTHCA_RES_UAR: + dev->limits.num_uars = profile[i].num; + init_hca->uar_scratch_base = profile[i].start; + break; + case MTHCA_RES_UDAV: + dev->av_table.ddr_av_base = profile[i].start; + dev->av_table.num_ddr_avs = profile[i].num; + break; + case MTHCA_RES_UARC: + dev->uar_table.uarc_size = request->uarc_size; + dev->uar_table.uarc_base = profile[i].start; + init_hca->uarc_base = profile[i].start; + init_hca->log_uarc_sz = ffs(request->uarc_size) - 13; + init_hca->log_uar_sz = ffs(request->num_uar) - 1; + break; + default: + break; + } + } + + /* + * PDs don't take any HCA memory, but we assign them as part + * of the HCA profile anyway. + */ + dev->limits.num_pds = MTHCA_NUM_PDS; + + /* + * For Tavor, FMRs use ioremapped PCI memory. For 32 bit + * systems it may use too much vmalloc space to map all MTT + * memory, so we reserve some MTTs for FMR access, taking them + * out of the MR pool. They don't use additional memory, but + * we assign them as part of the HCA profile anyway. + */ + if (mthca_is_memfree(dev)) + dev->limits.fmr_reserved_mtts = 0; + else + dev->limits.fmr_reserved_mtts = request->fmr_reserved_mtts; + + kfree(profile); + return total_size; +} diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_profile.h b/branches/MTHCA/hw/mthca/kernel/mthca_profile.h new file mode 100644 index 00000000..940fd76d --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_profile.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_profile.h 3047 2005-08-10 03:59:35Z roland $ + */ + +#ifndef MTHCA_PROFILE_H +#define MTHCA_PROFILE_H + +#include "mthca_dev.h" +#include "mthca_cmd.h" + +struct mthca_profile { + int num_qp; + int rdb_per_qp; + int num_srq; + int num_cq; + int num_mcg; + int num_mpt; + int num_mtt; + int num_udav; + int num_uar; + int uarc_size; + int fmr_reserved_mtts; +}; + +u64 mthca_make_profile(struct mthca_dev *mdev, + struct mthca_profile *request, + struct mthca_dev_lim *dev_lim, + struct mthca_init_hca_param *init_hca); + +#endif /* MTHCA_PROFILE_H */ diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_provider.c b/branches/MTHCA/hw/mthca/kernel/mthca_provider.c new file mode 100644 index 00000000..2bcaacde --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_provider.c @@ -0,0 +1,1157 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_provider.c 3047 2005-08-10 03:59:35Z roland $ + */ + +#include + +#include "mthca_dev.h" +#include "mthca_cmd.h" +#include "mthca_user.h" +#include "mthca_memfree.h" + + int mthca_query_device(struct ib_device *ibdev, + struct ib_device_attr *props) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + struct mthca_dev* mdev = to_mdev(ibdev); + + u8 status; + + in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); + out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + RtlZeroMemory(props, sizeof *props); + + props->fw_ver = mdev->fw_ver; + + RtlZeroMemory(in_mad, sizeof *in_mad); + in_mad->base_version = 1; + in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; + in_mad->class_version = 1; + in_mad->method = IB_MGMT_METHOD_GET; + in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; + + err = mthca_MAD_IFC(mdev, 1, 1, + 1, NULL, NULL, in_mad, out_mad, + &status); + if (err) + goto out; + if (status) { + err = -EINVAL; + goto out; + } + + props->device_cap_flags = mdev->device_cap_flags; + props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & + 0xffffff; + props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30)); + props->hw_ver = be16_to_cpup((__be16 *) (out_mad->data + 32)); + memcpy(&props->sys_image_guid, out_mad->data + 4, 8); + memcpy(&props->node_guid, out_mad->data + 12, 8); + + props->max_mr_size = ~0ull; + props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps; + props->max_qp_wr = 0xffff; + props->max_sge = mdev->limits.max_sg; + props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs; + props->max_cqe = 0xffff; + props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws; + props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds; + props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift; + props->max_qp_init_rd_atom = 1 << mdev->qp_table.rdb_shift; + props->local_ca_ack_delay = (u8)mdev->limits.local_ca_ack_delay; + + err = 0; + out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +int mthca_query_port(struct ib_device *ibdev, + u8 port, struct ib_port_attr *props) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + u8 status; + + in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); + out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + RtlZeroMemory(in_mad, sizeof *in_mad); + in_mad->base_version = 1; + in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; + in_mad->class_version = 1; + in_mad->method = IB_MGMT_METHOD_GET; + in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); + + err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, + port, NULL, NULL, in_mad, out_mad, + &status); + if (err) + goto out; + if (status) { + err = -EINVAL; + goto out; + } + + props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); + props->lmc = out_mad->data[34] & 0x7; + props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); + props->sm_sl = out_mad->data[36] & 0xf; + props->state = out_mad->data[32] & 0xf; + props->phys_state = out_mad->data[33] >> 4; + props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); + props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; + props->pkey_tbl_len = (u16)to_mdev(ibdev)->limits.pkey_table_len; + props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); + props->active_width = out_mad->data[31] & 0xf; + props->active_speed = out_mad->data[35] >> 4; + + out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +int mthca_modify_port(struct ib_device *ibdev, + u8 port, int port_modify_mask, + struct ib_port_modify *props) +{ + struct mthca_set_ib_param set_ib; + struct ib_port_attr attr; + int err; + u8 status; + + if (down_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) + return -EFAULT; + + err = mthca_query_port(ibdev, port, &attr); + if (err) + goto out; + + set_ib.set_si_guid = 0; + set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR); + + set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) & + ~props->clr_port_cap_mask; + + err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port, &status); + if (err) + goto out; + if (status) { + err = -EINVAL; + goto out; + } + +out: + up(&to_mdev(ibdev)->cap_mask_mutex); + return err; +} + +int mthca_query_pkey(struct ib_device *ibdev, + u8 port, u16 index, u16 *pkey) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + u8 status; + + in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); + out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + RtlZeroMemory(in_mad, sizeof *in_mad); + in_mad->base_version = 1; + in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; + in_mad->class_version = 1; + in_mad->method = IB_MGMT_METHOD_GET; + in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; + in_mad->attr_mod = cpu_to_be32(index / 32); + + err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, + port, NULL, NULL, in_mad, out_mad, + &status); + if (err) + goto out; + if (status) { + err = -EINVAL; + goto out; + } + + *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); + + out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +int mthca_query_gid(struct ib_device *ibdev, u8 port, + int index, union ib_gid *gid) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + u8 status; + + in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); + out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + RtlZeroMemory(in_mad, sizeof *in_mad); + in_mad->base_version = 1; + in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; + in_mad->class_version = 1; + in_mad->method = IB_MGMT_METHOD_GET; + in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); + + err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, + port, NULL, NULL, in_mad, out_mad, + &status); + if (err) + goto out; + if (status) { + err = -EINVAL; + goto out; + } + + memcpy(gid->raw, out_mad->data + 8, 8); + + RtlZeroMemory(in_mad, sizeof *in_mad); + in_mad->base_version = 1; + in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; + in_mad->class_version = 1; + in_mad->method = IB_MGMT_METHOD_GET; + in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; + in_mad->attr_mod = cpu_to_be32(index / 8); + + err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, + port, NULL, NULL, in_mad, out_mad, + &status); + if (err) + goto out; + if (status) { + err = -EINVAL; + goto out; + } + + memcpy(gid->raw + 8, out_mad->data + (index % 8) * 16, 8); + + out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev, + struct ib_udata *udata) +{ + struct mthca_alloc_ucontext_resp uresp; + struct mthca_ucontext *context; + int err; + + RtlZeroMemory(&uresp, sizeof uresp); + + uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps; + if (mthca_is_memfree(to_mdev(ibdev))) + uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size; + else + uresp.uarc_size = 0; + + context = kmalloc(sizeof *context, GFP_KERNEL); + if (!context) + return ERR_PTR(-ENOMEM); + + err = mthca_uar_alloc(to_mdev(ibdev), &context->uar); + if (err) { + kfree(context); + return ERR_PTR(err); + } + + context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev)); + if (IS_ERR(context->db_tab)) { + err = PTR_ERR(context->db_tab); + mthca_uar_free(to_mdev(ibdev), &context->uar); + kfree(context); + return ERR_PTR(err); + } + + if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) { + mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab); + mthca_uar_free(to_mdev(ibdev), &context->uar); + kfree(context); + return ERR_PTR(-EFAULT); + } + + return &context->ibucontext; +} + + int mthca_dealloc_ucontext(struct ib_ucontext *context) +{ + mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar, + to_mucontext(context)->db_tab); + mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar); + kfree(to_mucontext(context)); + + return 0; +} + +#ifdef LINUX_TO_BE_CHANGED +static int mthca_mmap_uar(struct ib_ucontext *context, + struct vm_area_struct *vma) +{ + if (vma->vm_end - vma->vm_start != PAGE_SIZE) + return -EINVAL; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + if (io_remap_pfn_range(vma, vma->vm_start, + to_mucontext(context)->uar.pfn, + PAGE_SIZE, vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} +#else +static int mthca_mmap_uar(struct ib_ucontext *context, + void*vma) +{ + return -EINVAL; +} +#endif +struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct mthca_pd *pd; + int err; + + pd = kmalloc(sizeof *pd, GFP_KERNEL); + if (!pd) + return ERR_PTR(-ENOMEM); + + err = mthca_pd_alloc(to_mdev(ibdev), !context, pd); + if (err) { + kfree(pd); + return ERR_PTR(err); + } + + if (context) { + if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (u32))) { + mthca_pd_free(to_mdev(ibdev), pd); + kfree(pd); + return ERR_PTR(-EFAULT); + } + } + + return &pd->ibpd; +} + +int mthca_dealloc_pd(struct ib_pd *pd) +{ + mthca_pd_free(to_mdev(pd->device), to_mpd(pd)); + kfree(pd); + + return 0; +} + +struct ib_ah *mthca_ah_create(struct ib_pd *pd, + struct ib_ah_attr *ah_attr) +{ + int err; + struct mthca_ah *ah; + + ah = kmalloc(sizeof *ah, GFP_ATOMIC); + if (!ah) + return ERR_PTR(-ENOMEM); + + err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah); + if (err) { + kfree(ah); + return ERR_PTR(err); + } + + return &ah->ibah; +} + +int mthca_ah_destroy(struct ib_ah *ah) +{ + mthca_destroy_ah(to_mdev(ah->device), to_mah(ah)); + kfree(ah); + + return 0; +} + +struct ib_srq *mthca_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *init_attr, + struct ib_udata *udata) +{ + struct mthca_create_srq ucmd; + struct mthca_ucontext *context = NULL; + struct mthca_srq *srq; + int err; + + srq = kmalloc(sizeof *srq, GFP_KERNEL); + if (!srq) + return ERR_PTR(-ENOMEM); + + if (pd->uobject) { + context = to_mucontext(pd->uobject->context); + + if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) + return ERR_PTR(-EFAULT); + + err = mthca_map_user_db(to_mdev(pd->device), &context->uar, + context->db_tab, ucmd.db_index, + ucmd.db_page); + + if (err) + goto err_free; + + srq->mr.ibmr.lkey = ucmd.lkey; + srq->db_index = ucmd.db_index; + } + + err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd), + &init_attr->attr, srq); + + if (err && pd->uobject) + mthca_unmap_user_db(to_mdev(pd->device), &context->uar, + context->db_tab, ucmd.db_index); + + if (err) + goto err_free; + + if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (u32))) { + mthca_free_srq(to_mdev(pd->device), srq); + err = -EFAULT; + goto err_free; + } + + return &srq->ibsrq; + +err_free: + kfree(srq); + + return ERR_PTR(err); +} + +int mthca_destroy_srq(struct ib_srq *srq) +{ + struct mthca_ucontext *context; + + if (srq->uobject) { + context = to_mucontext(srq->uobject->context); + + mthca_unmap_user_db(to_mdev(srq->device), &context->uar, + context->db_tab, to_msrq(srq)->db_index); + } + + mthca_free_srq(to_mdev(srq->device), to_msrq(srq)); + kfree(srq); + + return 0; +} + +struct ib_qp *mthca_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct mthca_create_qp ucmd; + struct mthca_qp *qp; + int err; + + switch (init_attr->qp_type) { + case IB_QPT_RELIABLE_CONN: + case IB_QPT_UNRELIABLE_CONN: + case IB_QPT_UNRELIABLE_DGRM: + { + struct mthca_ucontext *context; + + qp = kmalloc(sizeof *qp, GFP_KERNEL); + if (!qp) + return ERR_PTR(-ENOMEM); + + if (pd->uobject) { + context = to_mucontext(pd->uobject->context); + + if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) + return ERR_PTR(-EFAULT); + + err = mthca_map_user_db(to_mdev(pd->device), &context->uar, + context->db_tab, + ucmd.sq_db_index, ucmd.sq_db_page); + if (err) { + kfree(qp); + return ERR_PTR(err); + } + + err = mthca_map_user_db(to_mdev(pd->device), &context->uar, + context->db_tab, + ucmd.rq_db_index, ucmd.rq_db_page); + if (err) { + mthca_unmap_user_db(to_mdev(pd->device), + &context->uar, + context->db_tab, + ucmd.sq_db_index); + kfree(qp); + return ERR_PTR(err); + } + + qp->mr.ibmr.lkey = ucmd.lkey; + qp->sq.db_index = ucmd.sq_db_index; + qp->rq.db_index = ucmd.rq_db_index; + } + + err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd), + to_mcq(init_attr->send_cq), + to_mcq(init_attr->recv_cq), + init_attr->qp_type, init_attr->sq_sig_type, + &init_attr->cap, qp); + + if (err && pd->uobject) { + context = to_mucontext(pd->uobject->context); + + mthca_unmap_user_db(to_mdev(pd->device), + &context->uar, + context->db_tab, + ucmd.sq_db_index); + mthca_unmap_user_db(to_mdev(pd->device), + &context->uar, + context->db_tab, + ucmd.rq_db_index); + } + + qp->ibqp.qp_num = qp->qpn; + break; + } + case IB_QPT_QP0: + case IB_QPT_QP1: + { + /* Don't allow userspace to create special QPs */ + if (pd->uobject) + return ERR_PTR(-EINVAL); + + qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); + if (!qp) + return ERR_PTR(-ENOMEM); + + qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_QP0 ? 0 : 1; + + err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd), + to_mcq(init_attr->send_cq), + to_mcq(init_attr->recv_cq), + init_attr->sq_sig_type, &init_attr->cap, + qp->ibqp.qp_num, init_attr->port_num, + to_msqp(qp)); + break; + } + default: + /* Don't support raw QPs */ + return ERR_PTR(-ENOSYS); + } + + if (err) { + kfree(qp); + return ERR_PTR(err); + } + + init_attr->cap.max_inline_data = 0; + init_attr->cap.max_send_wr = qp->sq.max; + init_attr->cap.max_recv_wr = qp->rq.max; + init_attr->cap.max_send_sge = qp->sq.max_gs; + init_attr->cap.max_recv_sge = qp->rq.max_gs; + + return &qp->ibqp; +} + +int mthca_destroy_qp(struct ib_qp *qp) +{ + if (qp->uobject) { + mthca_unmap_user_db(to_mdev(qp->device), + &to_mucontext(qp->uobject->context)->uar, + to_mucontext(qp->uobject->context)->db_tab, + to_mqp(qp)->sq.db_index); + mthca_unmap_user_db(to_mdev(qp->device), + &to_mucontext(qp->uobject->context)->uar, + to_mucontext(qp->uobject->context)->db_tab, + to_mqp(qp)->rq.db_index); + } + mthca_free_qp(to_mdev(qp->device), to_mqp(qp)); + kfree(qp); + return 0; +} + +struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct mthca_create_cq ucmd; + struct mthca_cq *cq; + int nent; + int err; + + if (context) { + if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) + return ERR_PTR(-EFAULT); + + err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, + to_mucontext(context)->db_tab, + ucmd.set_db_index, ucmd.set_db_page); + if (err) + return ERR_PTR(err); + + err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, + to_mucontext(context)->db_tab, + ucmd.arm_db_index, ucmd.arm_db_page); + if (err) + goto err_unmap_set; + } + + cq = kmalloc(sizeof *cq, GFP_KERNEL); + if (!cq) { + err = -ENOMEM; + goto err_unmap_arm; + } + + if (context) { + cq->mr.ibmr.lkey = ucmd.lkey; + cq->set_ci_db_index = ucmd.set_db_index; + cq->arm_db_index = ucmd.arm_db_index; + } + + for (nent = 1; nent <= entries; nent <<= 1) + ; /* nothing */ + + err = mthca_init_cq(to_mdev(ibdev), nent, + context ? to_mucontext(context) : NULL, + context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num, + cq); + if (err) + goto err_free; + + if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (u32))) { + mthca_free_cq(to_mdev(ibdev), cq); + goto err_free; + } + + return &cq->ibcq; + +err_free: + kfree(cq); + +err_unmap_arm: + if (context) + mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, + to_mucontext(context)->db_tab, ucmd.arm_db_index); + +err_unmap_set: + if (context) + mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, + to_mucontext(context)->db_tab, ucmd.set_db_index); + + return ERR_PTR(err); +} + +int mthca_destroy_cq(struct ib_cq *cq) +{ + if (cq->uobject) { + mthca_unmap_user_db(to_mdev(cq->device), + &to_mucontext(cq->uobject->context)->uar, + to_mucontext(cq->uobject->context)->db_tab, + to_mcq(cq)->arm_db_index); + mthca_unmap_user_db(to_mdev(cq->device), + &to_mucontext(cq->uobject->context)->uar, + to_mucontext(cq->uobject->context)->db_tab, + to_mcq(cq)->set_ci_db_index); + } + mthca_free_cq(to_mdev(cq->device), to_mcq(cq)); + kfree(cq); + + return 0; +} + +static inline u32 convert_access(int acc) +{ + return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) | + (acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) | + (acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) | + (acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) | + MTHCA_MPT_FLAG_LOCAL_READ; +} + +struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc) +{ + struct mthca_mr *mr; + int err; + + mr = kmalloc(sizeof *mr, GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + err = mthca_mr_alloc_notrans(to_mdev(pd->device), + to_mpd(pd)->pd_num, + convert_access(acc), mr); + + if (err) { + kfree(mr); + return ERR_PTR(err); + } + + return &mr->ibmr; +} + +struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd, + struct ib_phys_buf *buffer_list, + int num_phys_buf, + int acc, + u64 *iova_start) +{ + struct mthca_mr *mr; + u64 *page_list; + u64 total_size; + u64 mask; + int shift; + int npages; + int err; + int i, j, n; + + /* First check that we have enough alignment */ + if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) + return ERR_PTR(-EINVAL); + + if (num_phys_buf > 1 && + ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) + return ERR_PTR(-EINVAL); + + mask = 0; + total_size = 0; + for (i = 0; i < num_phys_buf; ++i) { + if (i != 0 && buffer_list[i].addr & ~PAGE_MASK) + return ERR_PTR(-EINVAL); + if (i != 0 && i != num_phys_buf - 1 && + (buffer_list[i].size & ~PAGE_MASK)) + return ERR_PTR(-EINVAL); + + total_size += buffer_list[i].size; + if (i > 0) + mask |= buffer_list[i].addr; + } + + /* Find largest page shift we can use to cover buffers */ + for (shift = PAGE_SHIFT; shift < 31; ++shift) + if (num_phys_buf > 1) { + if ((1ULL << shift) & mask) + break; + } else { + if (1ULL << shift >= + buffer_list[0].size + + (buffer_list[0].addr & ((1ULL << shift) - 1))) + break; + } + + buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1); + buffer_list[0].addr &= ~0ull << shift; + + mr = kmalloc(sizeof *mr, GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + RtlZeroMemory(mr, sizeof *mr); + + npages = 0; + for (i = 0; i < num_phys_buf; ++i) + npages += (int)((buffer_list[i].size + (1ULL << shift) - 1) >> shift); + + if (!npages) + return &mr->ibmr; + + page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL); + if (!page_list) { + kfree(mr); + return ERR_PTR(-ENOMEM); + } + + n = 0; + for (i = 0; i < num_phys_buf; ++i) + for (j = 0; + j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift; + ++j) + page_list[n++] = buffer_list[i].addr + ((u64) j << shift); + + mthca_dbg(to_mdev(pd->device), "Registering memory at %llx (iova %llx) " + "in PD %x; shift %d, npages %d.\n", + (unsigned long long) buffer_list[0].addr, + (unsigned long long) *iova_start, + to_mpd(pd)->pd_num, + shift, npages); + + err = mthca_mr_alloc_phys(to_mdev(pd->device), + to_mpd(pd)->pd_num, + page_list, shift, npages, + *iova_start, total_size, + convert_access(acc), mr); + + if (err) { + kfree(page_list); + kfree(mr); + return ERR_PTR(err); + } + + kfree(page_list); + return &mr->ibmr; +} + +struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, + int acc, struct ib_udata *udata) +{ + struct mthca_dev *dev = to_mdev(pd->device); + struct ib_umem_chunk *chunk; + struct mthca_mr *mr; + u64 *pages; + int shift, n, len; + int i, j, k; + int err = 0; + + shift = ffs(region->page_size) - 1; + + mr = kmalloc(sizeof *mr, GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + RtlZeroMemory(mr, sizeof *mr); + + n = 0; + list_for_each_entry(chunk, ®ion->chunk_list, list,struct ib_umem_chunk) + n += chunk->nents; + + mr->mtt = mthca_alloc_mtt(dev, n); + if (IS_ERR(mr->mtt)) { + err = PTR_ERR(mr->mtt); + goto err; + } + + pages = (u64 *) kmalloc(PAGE_SIZE,GFP_KERNEL); + if (!pages) { + err = -ENOMEM; + goto err_mtt; + } + + i = n = 0; + + list_for_each_entry(chunk, ®ion->chunk_list, list,struct ib_umem_chunk) + for (j = 0; j < chunk->nmap; ++j) { + len = sg_dma_len(&chunk->page_list[j]) >> shift; + for (k = 0; k < len; ++k) { + pages[i++] = sg_dma_address(&chunk->page_list[j]) + + region->page_size * k; + /* + * Be friendly to WRITE_MTT command + * and leave two empty slots for the + * index and reserved fields of the + * mailbox. + */ + if (i == PAGE_SIZE / sizeof (u64) - 2) { + err = mthca_write_mtt(dev, mr->mtt, + n, pages, i); + if (err) + goto mtt_done; + n += i; + i = 0; + } + } + } + + if (i) + err = mthca_write_mtt(dev, mr->mtt, n, pages, i); +mtt_done: + free_page((void*) pages); + if (err) + goto err_mtt; + + err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, region->virt_base, + region->length, convert_access(acc), mr); + + if (err) + goto err_mtt; + + return &mr->ibmr; + +err_mtt: + mthca_free_mtt(dev, mr->mtt); + +err: + kfree(mr); + return ERR_PTR(err); +} + +int mthca_dereg_mr(struct ib_mr *mr) +{ + struct mthca_mr *mmr = to_mmr(mr); + mthca_free_mr(to_mdev(mr->device), mmr); + kfree(mmr); + return 0; +} + +struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags, + struct ib_fmr_attr *fmr_attr) +{ + struct mthca_fmr *fmr; + int err; + + fmr = kmalloc(sizeof *fmr, GFP_KERNEL); + if (!fmr) + return ERR_PTR(-ENOMEM); + + memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr); + err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num, + convert_access(mr_access_flags), fmr); + + if (err) { + kfree(fmr); + return ERR_PTR(err); + } + + return &fmr->ibmr; +} + +int mthca_dealloc_fmr(struct ib_fmr *fmr) +{ + struct mthca_fmr *mfmr = to_mfmr(fmr); + int err; + + err = mthca_free_fmr(to_mdev(fmr->device), mfmr); + if (err) + return err; + + kfree(mfmr); + return 0; +} + +int mthca_unmap_fmr(struct list_head *fmr_list) +{ + struct ib_fmr *fmr; + int err; + u8 status; + struct mthca_dev *mdev = NULL; + + list_for_each_entry(fmr, fmr_list, list,struct ib_fmr) { + if (mdev && to_mdev(fmr->device) != mdev) + return -EINVAL; + mdev = to_mdev(fmr->device); + } + + if (!mdev) + return 0; + + if (mthca_is_memfree(mdev)) { + list_for_each_entry(fmr, fmr_list, list,struct ib_fmr) + mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr)); + + wmb(); + } else + list_for_each_entry(fmr, fmr_list, list,struct ib_fmr) + mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr)); + + err = mthca_SYNC_TPT(mdev, &status); + if (err) + return err; + if (status) + return -EINVAL; + return 0; +} + +#ifdef LINUX_TO_BE_REMOVED +static ssize_t show_rev(struct class_device *cdev, char *buf) +{ + struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev); + return sprintf(buf, "%x\n", dev->rev_id); +} + +static ssize_t show_fw_ver(struct class_device *cdev, char *buf) +{ + struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev); + return sprintf(buf, "%x.%x.%x\n", (int) (dev->fw_ver >> 32), + (int) (dev->fw_ver >> 16) & 0xffff, + (int) dev->fw_ver & 0xffff); +} + +static ssize_t show_hca(struct class_device *cdev, char *buf) +{ + struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev); + switch (dev->device) { + case PCI_DEVICE_ID_MELLANOX_TAVOR: + return sprintf(buf, "MT23108\n"); + case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT: + return sprintf(buf, "MT25208 (MT23108 compat mode)\n"); + case PCI_DEVICE_ID_MELLANOX_ARBEL: + return sprintf(buf, "MT25208\n"); + case PCI_DEVICE_ID_MELLANOX_SINAI: + case PCI_DEVICE_ID_MELLANOX_SINAI_OLD: + return sprintf(buf, "MT25204\n"); + default: + return sprintf(buf, "unknown\n"); + } +} + +static ssize_t show_board(struct class_device *cdev, char *buf) +{ + struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev); + return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id); +} + +static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); +static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); +static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); +static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); + +static struct class_device_attribute *mthca_class_attributes[] = { + &class_device_attr_hw_rev, + &class_device_attr_fw_ver, + &class_device_attr_hca_type, + &class_device_attr_board_id +}; +#endif + +int mthca_register_device(struct mthca_dev *dev) +{ + int ret; + int i; + + strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX); +#ifdef LINUX_TO_BE_REMOVED + dev->ib_dev.owner = THIS_MODULE; +#endif + + dev->ib_dev.node_type = IB_NODE_CA; + dev->ib_dev.phys_port_cnt = (u8)dev->limits.num_ports; + dev->ib_dev.mdev = dev; +#ifdef LINUX_TO_BE_REMOVED + dev->ib_dev.class_dev.dev = &dev->pdev->dev; +#endif + dev->ib_dev.query_device = mthca_query_device; + dev->ib_dev.query_port = mthca_query_port; + dev->ib_dev.modify_port = mthca_modify_port; + dev->ib_dev.query_pkey = mthca_query_pkey; + dev->ib_dev.query_gid = mthca_query_gid; + dev->ib_dev.alloc_ucontext = mthca_alloc_ucontext; + dev->ib_dev.dealloc_ucontext = mthca_dealloc_ucontext; + dev->ib_dev.mmap = mthca_mmap_uar; + dev->ib_dev.alloc_pd = mthca_alloc_pd; + dev->ib_dev.dealloc_pd = mthca_dealloc_pd; + dev->ib_dev.create_ah = mthca_ah_create; + dev->ib_dev.destroy_ah = mthca_ah_destroy; + + if (dev->mthca_flags & MTHCA_FLAG_SRQ) { + dev->ib_dev.create_srq = mthca_create_srq; + dev->ib_dev.destroy_srq = mthca_destroy_srq; + + if (mthca_is_memfree(dev)) + dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv; + else + dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv; + } + + dev->ib_dev.create_qp = mthca_create_qp; + dev->ib_dev.modify_qp = mthca_modify_qp; + dev->ib_dev.destroy_qp = mthca_destroy_qp; + dev->ib_dev.create_cq = mthca_create_cq; + dev->ib_dev.destroy_cq = mthca_destroy_cq; + dev->ib_dev.poll_cq = mthca_poll_cq; + dev->ib_dev.get_dma_mr = mthca_get_dma_mr; + dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr; + dev->ib_dev.reg_user_mr = mthca_reg_user_mr; + dev->ib_dev.dereg_mr = mthca_dereg_mr; + + if (dev->mthca_flags & MTHCA_FLAG_FMR) { + dev->ib_dev.alloc_fmr = mthca_alloc_fmr; + dev->ib_dev.unmap_fmr = mthca_unmap_fmr; + dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr; + if (mthca_is_memfree(dev)) + dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr; + else + dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr; + } + + dev->ib_dev.attach_mcast = mthca_multicast_attach; + dev->ib_dev.detach_mcast = mthca_multicast_detach; + dev->ib_dev.process_mad = mthca_process_mad; + + if (mthca_is_memfree(dev)) { + dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq; + dev->ib_dev.post_send = mthca_arbel_post_send; + dev->ib_dev.post_recv = mthca_arbel_post_receive; + } else { + dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq; + dev->ib_dev.post_send = mthca_tavor_post_send; + dev->ib_dev.post_recv = mthca_tavor_post_receive; + } + + KeInitializeMutex(&dev->cap_mask_mutex, 0); + + ret = ib_register_device(&dev->ib_dev); + if (ret) + return ret; + +#ifdef LINUX_TO_BE_REMOVED + for (i = 0; i < ARRAY_SIZE(mthca_class_attributes); ++i) { + ret = class_device_create_file(&dev->ib_dev.class_dev, + mthca_class_attributes[i]); + if (ret) { + ib_unregister_device(&dev->ib_dev); + return ret; + } + } +#endif + + return 0; +} + +void mthca_unregister_device(struct mthca_dev *dev) +{ + ib_unregister_device(&dev->ib_dev); +} diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_provider.h b/branches/MTHCA/hw/mthca/kernel/mthca_provider.h new file mode 100644 index 00000000..d2f9286c --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_provider.h @@ -0,0 +1,411 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_provider.h 3047 2005-08-10 03:59:35Z roland $ + */ + +#ifndef MTHCA_PROVIDER_H +#define MTHCA_PROVIDER_H + +#include +#include + +#define MTHCA_MPT_FLAG_ATOMIC (1 << 14) +#define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13) +#define MTHCA_MPT_FLAG_REMOTE_READ (1 << 12) +#define MTHCA_MPT_FLAG_LOCAL_WRITE (1 << 11) +#define MTHCA_MPT_FLAG_LOCAL_READ (1 << 10) + +struct mthca_buf_list { + u8 *buf; + dma_addr_t mapping; +}; + +union mthca_buf { + struct mthca_buf_list direct; + struct mthca_buf_list *page_list; +}; + +struct mthca_uar { + unsigned long pfn; + int index; +}; + +struct mthca_user_db_table; + +struct mthca_ucontext { + struct ib_ucontext ibucontext; + struct mthca_uar uar; + struct mthca_user_db_table *db_tab; +}; + +struct mthca_mtt; + +struct mthca_mr { + //NB: the start of this structure is to be equal to mlnx_mro_t ! + //NB: the structure was not inserted here for not to mix driver and provider structures + struct ib_mr ibmr; + mt_iobuf_t iobuf; + struct mthca_mtt *mtt; +}; + +struct mthca_fmr { + struct ib_fmr ibmr; + struct ib_fmr_attr attr; + struct mthca_mtt *mtt; + int maps; + union { + struct { + struct mthca_mpt_entry __iomem *mpt; + u64 __iomem *mtts; + } tavor; + struct { + struct mthca_mpt_entry *mpt; + __be64 *mtts; + } arbel; + } mem; +}; + +struct mthca_pd { + struct ib_pd ibpd; + u32 pd_num; + atomic_t sqp_count; + struct mthca_mr ntmr; + int privileged; +}; + +struct mthca_eq { + struct mthca_dev *dev; + int eqn; + u32 eqn_mask; + u32 cons_index; + u16 msi_x_vector; + u16 msi_x_entry; + int have_irq; + int nent; + struct mthca_buf_list *page_list; + struct mthca_mr mr; + KDPC dpc; /* DPC for MSI-X interrupts */ + spinlock_t lock; /* spinlock for simult DPCs */ +}; + +struct mthca_av; + +enum mthca_ah_type { + MTHCA_AH_ON_HCA, + MTHCA_AH_PCI_POOL, + MTHCA_AH_KMALLOC +}; + +struct mthca_ah { + struct ib_ah ibah; + enum mthca_ah_type type; + u32 key; + struct mthca_av *av; + dma_addr_t avdma; +}; + +/* + * Quick description of our CQ/QP locking scheme: + * + * We have one global lock that protects dev->cq/qp_table. Each + * struct mthca_cq/qp also has its own lock. An individual qp lock + * may be taken inside of an individual cq lock. Both cqs attached to + * a qp may be locked, with the send cq locked first. No other + * nesting should be done. + * + * Each struct mthca_cq/qp also has an atomic_t ref count. The + * pointer from the cq/qp_table to the struct counts as one reference. + * This reference also is good for access through the consumer API, so + * modifying the CQ/QP etc doesn't need to take another reference. + * Access because of a completion being polled does need a reference. + * + * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the + * destroy function to sleep on. + * + * This means that access from the consumer API requires nothing but + * taking the struct's lock. + * + * Access because of a completion event should go as follows: + * - lock cq/qp_table and look up struct + * - increment ref count in struct + * - drop cq/qp_table lock + * - lock struct, do your thing, and unlock struct + * - decrement ref count; if zero, wake up waiters + * + * To destroy a CQ/QP, we can do the following: + * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock + * - decrement ref count + * - wait_event until ref count is zero + * + * It is the consumer's responsibilty to make sure that no QP + * operations (WQE posting or state modification) are pending when the + * QP is destroyed. Also, the consumer must make sure that calls to + * qp_modify are serialized. + * + * Possible optimizations (wait for profile data to see if/where we + * have locks bouncing between CPUs): + * - split cq/qp table lock into n separate (cache-aligned) locks, + * indexed (say) by the page in the table + * - split QP struct lock into three (one for common info, one for the + * send queue and one for the receive queue) + */ +//TODO: check correctness of the above requirement: "It is the consumer's responsibilty to make sure that no QP +// operations (WQE posting or state modification) are pending when the QP is destroyed" + +struct mthca_cq { + struct ib_cq ibcq; + void *cq_context; // leo: for IBAL shim + spinlock_t lock; + atomic_t refcount; + int cqn; + u32 cons_index; + int is_direct; + int is_kernel; + + /* Next fields are Arbel only */ + int set_ci_db_index; + __be32 *set_ci_db; + int arm_db_index; + __be32 *arm_db; + int arm_sn; + + union mthca_buf queue; + struct mthca_mr mr; + wait_queue_head_t wait; +}; + +struct mthca_srq { + struct ib_srq ibsrq; + spinlock_t lock; + atomic_t refcount; + int srqn; + int max; + int max_gs; + int wqe_shift; + int first_free; + int last_free; + u16 counter; /* Arbel only */ + int db_index; /* Arbel only */ + __be32 *db; /* Arbel only */ + void *last; + + int is_direct; + u64 *wrid; + union mthca_buf queue; + struct mthca_mr mr; + + wait_queue_head_t wait; +}; + +struct mthca_wq { + spinlock_t lock; + int max; + unsigned next_ind; + unsigned last_comp; + unsigned head; + unsigned tail; + void *last; + int max_gs; + int wqe_shift; + + int db_index; /* Arbel only */ + __be32 *db; +}; + +struct mthca_qp { + struct ib_qp ibqp; + void *qp_context; // leo: for IBAL shim + atomic_t refcount; + u32 qpn; + int is_direct; + u8 transport; + u8 state; + u8 atomic_rd_en; + u8 resp_depth; + + struct mthca_mr mr; + + struct mthca_wq rq; + struct mthca_wq sq; + enum ib_sig_type sq_policy; + int send_wqe_offset; + + u64 *wrid; + union mthca_buf queue; + + wait_queue_head_t wait; +}; + +struct mthca_sqp { + struct mthca_qp qp; + int port; + int pkey_index; + u32 qkey; + u32 send_psn; + struct ib_ud_header ud_header; + int header_buf_size; + void *header_buf; + dma_addr_t header_dma; +}; + +static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct mthca_ucontext, ibucontext); +} + +static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr) +{ + return container_of(ibmr, struct mthca_fmr, ibmr); +} + +static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct mthca_mr, ibmr); +} + +static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct mthca_pd, ibpd); +} + +static inline struct mthca_ah *to_mah(struct ib_ah *ibah) +{ + return container_of(ibah, struct mthca_ah, ibah); +} + +static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct mthca_cq, ibcq); +} + +static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq) +{ + return container_of(ibsrq, struct mthca_srq, ibsrq); +} + +static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct mthca_qp, ibqp); +} + +static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp) +{ + return container_of(qp, struct mthca_sqp, qp); +} + +static inline int start_port(struct ib_device *device) +{ + return device->node_type == IB_NODE_SWITCH ? 0 : 1; +} + +static inline int end_port(struct ib_device *device) +{ + return device->node_type == IB_NODE_SWITCH ? 0 : device->phys_port_cnt; +} + + + +// API +int mthca_query_device(struct ib_device *ibdev, + struct ib_device_attr *props); + +int mthca_query_port(struct ib_device *ibdev, + u8 port, struct ib_port_attr *props); + +int mthca_modify_port(struct ib_device *ibdev, + u8 port, int port_modify_mask, + struct ib_port_modify *props); + +int mthca_query_pkey(struct ib_device *ibdev, + u8 port, u16 index, u16 *pkey); + +int mthca_query_gid(struct ib_device *ibdev, u8 port, + int index, union ib_gid *gid); + +struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev, + struct ib_udata *udata); + +int mthca_dealloc_ucontext(struct ib_ucontext *context); + +struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata); + +int mthca_dealloc_pd(struct ib_pd *pd); + +struct ib_ah *mthca_ah_create(struct ib_pd *pd, + struct ib_ah_attr *ah_attr); + +int mthca_ah_destroy(struct ib_ah *ah); + +struct ib_srq *mthca_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *init_attr, + struct ib_udata *udata); + +int mthca_destroy_srq(struct ib_srq *srq); + +struct ib_qp *mthca_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata); + +int mthca_destroy_qp(struct ib_qp *qp); + +struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, + struct ib_ucontext *context, + struct ib_udata *udata); + +int mthca_destroy_cq(struct ib_cq *cq); + +struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc); + +struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd, + struct ib_phys_buf *buffer_list, + int num_phys_buf, + int acc, + u64 *iova_start); + +struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, + int acc, struct ib_udata *udata); + +int mthca_dereg_mr(struct ib_mr *mr); + +struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags, + struct ib_fmr_attr *fmr_attr); + +int mthca_dealloc_fmr(struct ib_fmr *fmr); + +int mthca_unmap_fmr(struct list_head *fmr_list); + + +#endif /* MTHCA_PROVIDER_H */ diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_qp.c b/branches/MTHCA/hw/mthca/kernel/mthca_qp.c new file mode 100644 index 00000000..2ba3886b --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_qp.c @@ -0,0 +1,2025 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_qp.c 3047 2005-08-10 03:59:35Z roland $ + */ + +#include +#include +#include + +#include "mthca_dev.h" +#include "mthca_cmd.h" +#include "mthca_memfree.h" +#include "mthca_wqe.h" + +enum { + MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, + MTHCA_ACK_REQ_FREQ = 10, + MTHCA_FLIGHT_LIMIT = 9, + MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */ + MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */ + MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */ +}; + +enum { + MTHCA_QP_STATE_RST = 0, + MTHCA_QP_STATE_INIT = 1, + MTHCA_QP_STATE_RTR = 2, + MTHCA_QP_STATE_RTS = 3, + MTHCA_QP_STATE_SQE = 4, + MTHCA_QP_STATE_SQD = 5, + MTHCA_QP_STATE_ERR = 6, + MTHCA_QP_STATE_DRAINING = 7 +}; + +enum { + MTHCA_QP_ST_RC = 0x0, + MTHCA_QP_ST_UC = 0x1, + MTHCA_QP_ST_RD = 0x2, + MTHCA_QP_ST_UD = 0x3, + MTHCA_QP_ST_MLX = 0x7 +}; + +enum { + MTHCA_QP_PM_MIGRATED = 0x3, + MTHCA_QP_PM_ARMED = 0x0, + MTHCA_QP_PM_REARM = 0x1 +}; + +enum { + /* qp_context flags */ + MTHCA_QP_BIT_DE = 1 << 8, + /* params1 */ + MTHCA_QP_BIT_SRE = 1 << 15, + MTHCA_QP_BIT_SWE = 1 << 14, + MTHCA_QP_BIT_SAE = 1 << 13, + MTHCA_QP_BIT_SIC = 1 << 4, + MTHCA_QP_BIT_SSC = 1 << 3, + /* params2 */ + MTHCA_QP_BIT_RRE = 1 << 15, + MTHCA_QP_BIT_RWE = 1 << 14, + MTHCA_QP_BIT_RAE = 1 << 13, + MTHCA_QP_BIT_RIC = 1 << 4, + MTHCA_QP_BIT_RSC = 1 << 3 +}; + +#pragma pack(push,1) +struct mthca_qp_path { + __be32 port_pkey; + u8 rnr_retry; + u8 g_mylmc; + __be16 rlid; + u8 ackto; + u8 mgid_index; + u8 static_rate; + u8 hop_limit; + __be32 sl_tclass_flowlabel; + u8 rgid[16]; +} __attribute__((packed)); + +struct mthca_qp_context { + __be32 flags; + __be32 tavor_sched_queue; /* Reserved on Arbel */ + u8 mtu_msgmax; + u8 rq_size_stride; /* Reserved on Tavor */ + u8 sq_size_stride; /* Reserved on Tavor */ + u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ + __be32 usr_page; + __be32 local_qpn; + __be32 remote_qpn; + u32 reserved1[2]; + struct mthca_qp_path pri_path; + struct mthca_qp_path alt_path; + __be32 rdd; + __be32 pd; + __be32 wqe_base; + __be32 wqe_lkey; + __be32 params1; + __be32 reserved2; + __be32 next_send_psn; + __be32 cqn_snd; + __be32 snd_wqe_base_l; /* Next send WQE on Tavor */ + __be32 snd_db_index; /* (debugging only entries) */ + __be32 last_acked_psn; + __be32 ssn; + __be32 params2; + __be32 rnr_nextrecvpsn; + __be32 ra_buff_indx; + __be32 cqn_rcv; + __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ + __be32 rcv_db_index; /* (debugging only entries) */ + __be32 qkey; + __be32 srqn; + __be32 rmsn; + __be16 rq_wqe_counter; /* reserved on Tavor */ + __be16 sq_wqe_counter; /* reserved on Tavor */ + u32 reserved3[18]; +} __attribute__((packed)); + +struct mthca_qp_param { + __be32 opt_param_mask; + u32 reserved1; + struct mthca_qp_context context; + u32 reserved2[62]; +} __attribute__((packed)); +#pragma pack(pop) + +enum { + MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, + MTHCA_QP_OPTPAR_RRE = 1 << 1, + MTHCA_QP_OPTPAR_RAE = 1 << 2, + MTHCA_QP_OPTPAR_RWE = 1 << 3, + MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4, + MTHCA_QP_OPTPAR_Q_KEY = 1 << 5, + MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, + MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, + MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8, + MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9, + MTHCA_QP_OPTPAR_PM_STATE = 1 << 10, + MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11, + MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12, + MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13, + MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, + MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15, + MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16 +}; + +static const u8 mthca_opcode[] = { + MTHCA_OPCODE_RDMA_WRITE, + MTHCA_OPCODE_RDMA_WRITE_IMM, + MTHCA_OPCODE_SEND, + MTHCA_OPCODE_SEND_IMM, + MTHCA_OPCODE_RDMA_READ, + MTHCA_OPCODE_ATOMIC_CS, + MTHCA_OPCODE_ATOMIC_FA +}; + + +//TODO: these literals are also defined in ib_types.h and have there ANOTHER VALUES !!! +enum ib_qp_state { + IBQPS_RESET, + IBQPS_INIT, + IBQPS_RTR, + IBQPS_RTS, + IBQPS_SQD, + IBQPS_SQE, + IBQPS_ERR +}; + +enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS }; + +static struct _state_table { + int trans; + u32 req_param[NUM_TRANS]; + u32 opt_param[NUM_TRANS]; +} state_table[IBQPS_ERR + 1][IBQPS_ERR + 1]= {0}; + +static void fill_state_table() +{ + struct _state_table *t; + RtlZeroMemory( state_table, sizeof(state_table) ); + + /* IBQPS_RESET */ + t = &state_table[IBQPS_RESET][0]; + t[IBQPS_RESET].trans = MTHCA_TRANS_ANY2RST; + t[IBQPS_ERR].trans = MTHCA_TRANS_ANY2ERR; + + t[IBQPS_INIT].trans = MTHCA_TRANS_RST2INIT; + t[IBQPS_INIT].req_param[UD] = IB_QP_PKEY_INDEX |IB_QP_PORT |IB_QP_QKEY; + t[IBQPS_INIT].req_param[UC] = IB_QP_PKEY_INDEX |IB_QP_PORT |IB_QP_ACCESS_FLAGS; + t[IBQPS_INIT].req_param[RC] = IB_QP_PKEY_INDEX |IB_QP_PORT |IB_QP_ACCESS_FLAGS; + t[IBQPS_INIT].req_param[MLX] = IB_QP_PKEY_INDEX |IB_QP_QKEY; + t[IBQPS_INIT].opt_param[MLX] = IB_QP_PORT; + + /* IBQPS_INIT */ + t = &state_table[IBQPS_INIT][0]; + t[IBQPS_RESET].trans = MTHCA_TRANS_ANY2RST; + t[IBQPS_ERR].trans = MTHCA_TRANS_ANY2ERR; + + t[IBQPS_INIT].trans = MTHCA_TRANS_INIT2INIT; + t[IBQPS_INIT].opt_param[UD] = IB_QP_PKEY_INDEX |IB_QP_PORT |IB_QP_QKEY; + t[IBQPS_INIT].opt_param[UC] = IB_QP_PKEY_INDEX |IB_QP_PORT |IB_QP_ACCESS_FLAGS; + t[IBQPS_INIT].opt_param[RC] = IB_QP_PKEY_INDEX |IB_QP_PORT |IB_QP_ACCESS_FLAGS; + t[IBQPS_INIT].opt_param[MLX] = IB_QP_PKEY_INDEX |IB_QP_QKEY; + + t[IBQPS_RTR].trans = MTHCA_TRANS_INIT2RTR; + t[IBQPS_RTR].req_param[UC] = + IB_QP_AV |IB_QP_PATH_MTU |IB_QP_DEST_QPN |IB_QP_RQ_PSN |IB_QP_MAX_DEST_RD_ATOMIC; + t[IBQPS_RTR].req_param[RC] = + IB_QP_AV |IB_QP_PATH_MTU |IB_QP_DEST_QPN |IB_QP_RQ_PSN |IB_QP_MAX_DEST_RD_ATOMIC |IB_QP_MIN_RNR_TIMER; + t[IBQPS_RTR].opt_param[UD] = IB_QP_PKEY_INDEX |IB_QP_QKEY; + t[IBQPS_RTR].opt_param[UC] = IB_QP_PKEY_INDEX |IB_QP_ALT_PATH |IB_QP_ACCESS_FLAGS; + t[IBQPS_RTR].opt_param[RC] = IB_QP_PKEY_INDEX |IB_QP_ALT_PATH |IB_QP_ACCESS_FLAGS; + t[IBQPS_RTR].opt_param[MLX] = IB_QP_PKEY_INDEX |IB_QP_QKEY; + +/* IBQPS_RTR */ + t = &state_table[IBQPS_RTR][0]; + t[IBQPS_RESET].trans = MTHCA_TRANS_ANY2RST; + t[IBQPS_ERR].trans = MTHCA_TRANS_ANY2ERR; + + t[IBQPS_RTS].trans = MTHCA_TRANS_RTR2RTS; + t[IBQPS_RTS].req_param[UD] = IB_QP_SQ_PSN; + t[IBQPS_RTS].req_param[UC] = IB_QP_SQ_PSN |IB_QP_MAX_QP_RD_ATOMIC; + t[IBQPS_RTS].req_param[RC] = + IB_QP_TIMEOUT |IB_QP_RETRY_CNT |IB_QP_RNR_RETRY |IB_QP_SQ_PSN |IB_QP_MAX_QP_RD_ATOMIC; + t[IBQPS_RTS].req_param[MLX] = IB_QP_SQ_PSN; + t[IBQPS_RTS].opt_param[UD] = IB_QP_CUR_STATE |IB_QP_QKEY; + t[IBQPS_RTS].opt_param[UC] = + IB_QP_CUR_STATE |IB_QP_ALT_PATH |IB_QP_ACCESS_FLAGS |IB_QP_PKEY_INDEX |IB_QP_PATH_MIG_STATE; + t[IBQPS_RTS].opt_param[RC] = IB_QP_CUR_STATE |IB_QP_ALT_PATH | + IB_QP_ACCESS_FLAGS |IB_QP_PKEY_INDEX |IB_QP_MIN_RNR_TIMER |IB_QP_PATH_MIG_STATE; + t[IBQPS_RTS].opt_param[MLX] = IB_QP_CUR_STATE |IB_QP_QKEY; + + /* IBQPS_RTS */ + t = &state_table[IBQPS_RTS][0]; + t[IBQPS_RESET].trans = MTHCA_TRANS_ANY2RST; + t[IBQPS_ERR].trans = MTHCA_TRANS_ANY2ERR; + + t[IBQPS_RTS].trans = MTHCA_TRANS_RTS2RTS; + t[IBQPS_RTS].opt_param[UD] = IB_QP_CUR_STATE |IB_QP_QKEY; + t[IBQPS_RTS].opt_param[UC] = IB_QP_ACCESS_FLAGS |IB_QP_ALT_PATH |IB_QP_PATH_MIG_STATE; + t[IBQPS_RTS].opt_param[RC] = IB_QP_ACCESS_FLAGS | + IB_QP_ALT_PATH |IB_QP_PATH_MIG_STATE |IB_QP_MIN_RNR_TIMER; + t[IBQPS_RTS].opt_param[MLX] = IB_QP_CUR_STATE |IB_QP_QKEY; + + t[IBQPS_SQD].trans = MTHCA_TRANS_RTS2SQD; + + /* IBQPS_SQD */ + t = &state_table[IBQPS_SQD][0]; + t[IBQPS_RESET].trans = MTHCA_TRANS_ANY2RST; + t[IBQPS_ERR].trans = MTHCA_TRANS_ANY2ERR; + + t[IBQPS_RTS].trans = MTHCA_TRANS_SQD2RTS; + t[IBQPS_RTS].opt_param[UD] = IB_QP_CUR_STATE |IB_QP_QKEY; + t[IBQPS_RTS].opt_param[UC] = IB_QP_CUR_STATE | + IB_QP_ALT_PATH |IB_QP_ACCESS_FLAGS |IB_QP_PATH_MIG_STATE; + t[IBQPS_RTS].opt_param[RC] = IB_QP_CUR_STATE |IB_QP_ALT_PATH | + IB_QP_ACCESS_FLAGS |IB_QP_MIN_RNR_TIMER |IB_QP_PATH_MIG_STATE; + t[IBQPS_RTS].opt_param[MLX] = IB_QP_CUR_STATE |IB_QP_QKEY; + + t[IBQPS_SQD].trans = MTHCA_TRANS_SQD2SQD; + t[IBQPS_SQD].opt_param[UD] = IB_QP_PKEY_INDEX |IB_QP_QKEY; + t[IBQPS_SQD].opt_param[UC] = IB_QP_AV |IB_QP_MAX_QP_RD_ATOMIC |IB_QP_MAX_DEST_RD_ATOMIC | + IB_QP_CUR_STATE |IB_QP_ALT_PATH |IB_QP_ACCESS_FLAGS |IB_QP_PKEY_INDEX |IB_QP_PATH_MIG_STATE; + t[IBQPS_SQD].opt_param[RC] = IB_QP_AV |IB_QP_TIMEOUT |IB_QP_RETRY_CNT |IB_QP_RNR_RETRY | + IB_QP_MAX_QP_RD_ATOMIC |IB_QP_MAX_DEST_RD_ATOMIC |IB_QP_CUR_STATE |IB_QP_ALT_PATH | + IB_QP_ACCESS_FLAGS |IB_QP_PKEY_INDEX |IB_QP_MIN_RNR_TIMER |IB_QP_PATH_MIG_STATE; + t[IBQPS_SQD].opt_param[MLX] = IB_QP_PKEY_INDEX |IB_QP_QKEY; + + /* IBQPS_SQE */ + t = &state_table[IBQPS_SQE][0]; + t[IBQPS_RESET].trans = MTHCA_TRANS_ANY2RST; + t[IBQPS_ERR].trans = MTHCA_TRANS_ANY2ERR; + + t[IBQPS_RTS].trans = MTHCA_TRANS_SQERR2RTS; + t[IBQPS_RTS].opt_param[UD] = IB_QP_CUR_STATE |IB_QP_QKEY; + t[IBQPS_RTS].opt_param[UC] = IB_QP_CUR_STATE; + t[IBQPS_RTS].opt_param[RC] = IB_QP_CUR_STATE |IB_QP_MIN_RNR_TIMER; + t[IBQPS_RTS].opt_param[MLX] = IB_QP_CUR_STATE |IB_QP_QKEY; + + /* IBQPS_ERR */ + t = &state_table[IBQPS_ERR][0]; + t[IBQPS_RESET].trans = MTHCA_TRANS_ANY2RST; + t[IBQPS_ERR].trans = MTHCA_TRANS_ANY2ERR; + +}; + + +static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) +{ + return qp->qpn >= (u32)dev->qp_table.sqp_start && + qp->qpn <= (u32)dev->qp_table.sqp_start + 3; +} + +static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) +{ + return qp->qpn >= (u32)dev->qp_table.sqp_start && + qp->qpn <= (u32)(dev->qp_table.sqp_start + 1); +} + +static void *get_recv_wqe(struct mthca_qp *qp, int n) +{ + if (qp->is_direct) + return qp->queue.direct.buf + (n << qp->rq.wqe_shift); + else + return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + + ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); +} + +static void *get_send_wqe(struct mthca_qp *qp, int n) +{ + if (qp->is_direct) + return qp->queue.direct.buf + qp->send_wqe_offset + + (n << qp->sq.wqe_shift); + else + return qp->queue.page_list[(qp->send_wqe_offset + + (n << qp->sq.wqe_shift)) >> + PAGE_SHIFT].buf + + ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & + (PAGE_SIZE - 1)); +} + +void mthca_qp_event(struct mthca_dev *dev, u32 qpn, + enum ib_event_type event_type) +{ + struct mthca_qp *qp; + struct ib_event event; + + spin_lock(&dev->qp_table.lock); + qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); + if (qp) + atomic_inc(&qp->refcount); + spin_unlock(&dev->qp_table.lock); + + if (!qp) { + mthca_warn(dev, "Async event for bogus QP %08x\n", qpn); + return; + } + + event.device = &dev->ib_dev; + event.event = event_type; + event.element.qp = &qp->ibqp; + if (qp->ibqp.event_handler) + qp->ibqp.event_handler(&event, qp->ibqp.qp_context); + + if (atomic_dec_and_test(&qp->refcount)) + wake_up(&qp->wait); +} + +static int to_mthca_state(enum ib_qp_state ib_state) +{ + switch (ib_state) { + case IBQPS_RESET: return MTHCA_QP_STATE_RST; + case IBQPS_INIT: return MTHCA_QP_STATE_INIT; + case IBQPS_RTR: return MTHCA_QP_STATE_RTR; + case IBQPS_RTS: return MTHCA_QP_STATE_RTS; + case IBQPS_SQD: return MTHCA_QP_STATE_SQD; + case IBQPS_SQE: return MTHCA_QP_STATE_SQE; + case IBQPS_ERR: return MTHCA_QP_STATE_ERR; + default: return -1; + } +} + +static int to_mthca_st(int transport) +{ + switch (transport) { + case RC: return MTHCA_QP_ST_RC; + case UC: return MTHCA_QP_ST_UC; + case UD: return MTHCA_QP_ST_UD; + case RD: return MTHCA_QP_ST_RD; + case MLX: return MTHCA_QP_ST_MLX; + default: return -1; + } +} + +static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr, + int attr_mask) +{ + if (attr_mask & IB_QP_PKEY_INDEX) + sqp->pkey_index = attr->pkey_index; + if (attr_mask & IB_QP_QKEY) + sqp->qkey = attr->qkey; + if (attr_mask & IB_QP_SQ_PSN) + sqp->send_psn = attr->sq_psn; +} + +static void init_port(struct mthca_dev *dev, int port) +{ + int err; + u8 status; + struct mthca_init_ib_param param; + + RtlZeroMemory(¶m, sizeof param); + + param.enable_1x = 1; + param.enable_4x = 1; + param.vl_cap = dev->limits.vl_cap; + param.mtu_cap = dev->limits.mtu_cap; + param.gid_cap = (u16)dev->limits.gid_table_len; + param.pkey_cap = (u16)dev->limits.pkey_table_len; + + err = mthca_INIT_IB(dev, ¶m, port, &status); + if (err) + mthca_warn(dev, "INIT_IB failed, return code %d.\n", err); + if (status) + mthca_warn(dev, "INIT_IB returned status %02x.\n", status); +} + +int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) +{ + struct mthca_dev *dev = to_mdev(ibqp->device); + struct mthca_qp *qp = to_mqp(ibqp); + enum ib_qp_state cur_state, new_state; + struct mthca_mailbox *mailbox; + struct mthca_qp_param *qp_param; + struct mthca_qp_context *qp_context; + u32 req_param, opt_param; + u8 status; + int err; + + if (attr_mask & IB_QP_CUR_STATE) { + if (attr->cur_qp_state != IBQPS_RTR && + attr->cur_qp_state != IBQPS_RTS && + attr->cur_qp_state != IBQPS_SQD && + attr->cur_qp_state != IBQPS_SQE) + return -EINVAL; + else + cur_state = attr->cur_qp_state; + } else { + spin_lock_irq(&qp->sq.lock); + spin_lock(&qp->rq.lock); + cur_state = qp->state; + spin_unlock(&qp->rq.lock); + spin_unlock_irq(&qp->sq.lock); + } + + if (attr_mask & IB_QP_STATE) { + if (attr->qp_state < 0 || attr->qp_state > IBQPS_ERR) + return -EINVAL; + new_state = attr->qp_state; + } else + new_state = cur_state; + + if (state_table[cur_state][new_state].trans == MTHCA_TRANS_INVALID) { + mthca_dbg(dev, "Illegal QP transition " + "%d->%d\n", cur_state, new_state); + return -EINVAL; + } + + req_param = state_table[cur_state][new_state].req_param[qp->transport]; + opt_param = state_table[cur_state][new_state].opt_param[qp->transport]; + + if ((req_param & attr_mask) != req_param) { + mthca_dbg(dev, "QP transition " + "%d->%d missing req attr 0x%08x\n", + cur_state, new_state, + req_param & ~attr_mask); + return -EINVAL; + } + + if (attr_mask & ~(req_param | opt_param | IB_QP_STATE)) { + mthca_dbg(dev, "QP transition (transport %d) " + "%d->%d has extra attr 0x%08x\n", + qp->transport, + cur_state, new_state, + attr_mask & ~(req_param | opt_param | + IB_QP_STATE)); + return -EINVAL; + } + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + qp_param = mailbox->buf; + qp_context = &qp_param->context; + RtlZeroMemory(qp_param, sizeof *qp_param); + + qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) | + (to_mthca_st(qp->transport) << 16)); + qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE); + if (!(attr_mask & IB_QP_PATH_MIG_STATE)) + qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); + else { + qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE); + switch (attr->path_mig_state) { + case IB_MIG_MIGRATED: + qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); + break; + case IB_MIG_REARM: + qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11); + break; + case IB_MIG_ARMED: + qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11); + break; + } + } + + /* leave tavor_sched_queue as 0 */ + + if (qp->transport == MLX || qp->transport == UD) + qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11; + else if (attr_mask & IB_QP_PATH_MTU) + qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; + + if (mthca_is_memfree(dev)) { + if (qp->rq.max) + qp_context->rq_size_stride = long_log2(qp->rq.max) << 3; + qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; + + if (qp->sq.max) + qp_context->sq_size_stride = long_log2(qp->sq.max) << 3; + qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; + } + + /* leave arbel_sched_queue as 0 */ + + if (qp->ibqp.uobject) + qp_context->usr_page = + cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index); + else + qp_context->usr_page = cpu_to_be32(dev->driver_uar.index); + qp_context->local_qpn = cpu_to_be32(qp->qpn); + if (attr_mask & IB_QP_DEST_QPN) { + qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num); + } + + if (qp->transport == MLX) + qp_context->pri_path.port_pkey |= + cpu_to_be32(to_msqp(qp)->port << 24); + else { + if (attr_mask & IB_QP_PORT) { + qp_context->pri_path.port_pkey |= + cpu_to_be32(attr->port_num << 24); + qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM); + } + } + + if (attr_mask & IB_QP_PKEY_INDEX) { + qp_context->pri_path.port_pkey |= + cpu_to_be32(attr->pkey_index); + qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX); + } + + if (attr_mask & IB_QP_RNR_RETRY) { + qp_context->pri_path.rnr_retry = attr->rnr_retry << 5; + qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY); + } + + if (attr_mask & IB_QP_AV) { + qp_context->pri_path.g_mylmc = attr->ah_attr.src_path_bits & 0x7f; + qp_context->pri_path.rlid = cpu_to_be16(attr->ah_attr.dlid); + qp_context->pri_path.static_rate = !!attr->ah_attr.static_rate; + if (attr->ah_attr.ah_flags & IB_AH_GRH) { + qp_context->pri_path.g_mylmc |= 1 << 7; + qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index; + qp_context->pri_path.hop_limit = attr->ah_attr.grh.hop_limit; + qp_context->pri_path.sl_tclass_flowlabel = + cpu_to_be32((attr->ah_attr.sl << 28) | + (attr->ah_attr.grh.traffic_class << 20) | + (attr->ah_attr.grh.flow_label)); + memcpy(qp_context->pri_path.rgid, + attr->ah_attr.grh.dgid.raw, 16); + } else { + qp_context->pri_path.sl_tclass_flowlabel = + cpu_to_be32(attr->ah_attr.sl << 28); + } + qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); + } + + if (attr_mask & IB_QP_TIMEOUT) { + qp_context->pri_path.ackto = attr->timeout; + qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); + } + + /* XXX alt_path */ + + /* leave rdd as 0 */ + qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num); + /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */ + qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey); + qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) | + (MTHCA_FLIGHT_LIMIT << 24) | + MTHCA_QP_BIT_SRE | + MTHCA_QP_BIT_SWE | + MTHCA_QP_BIT_SAE); + if (qp->sq_policy == IB_SIGNAL_ALL_WR) + qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC); + if (attr_mask & IB_QP_RETRY_CNT) { + qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16); + qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT); + } + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { + long val = attr->max_rd_atomic; + qp_context->params1 |= cpu_to_be32(min(val ? ffs(val) - 1 : 0, 7) << 21); + qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX); + } + + if (attr_mask & IB_QP_SQ_PSN) + qp_context->next_send_psn = cpu_to_be32(attr->sq_psn); + qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); + + if (mthca_is_memfree(dev)) { + qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset); + qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); + } + + if (attr_mask & IB_QP_ACCESS_FLAGS) { + /* + * Only enable RDMA/atomics if we have responder + * resources set to a non-zero value. + */ + if (qp->resp_depth) { + qp_context->params2 |= + cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ? + MTHCA_QP_BIT_RWE : 0); + qp_context->params2 |= + cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ? + MTHCA_QP_BIT_RRE : 0); + qp_context->params2 |= + cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC ? + MTHCA_QP_BIT_RAE : 0); + } + + qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | + MTHCA_QP_OPTPAR_RRE | + MTHCA_QP_OPTPAR_RAE); + + qp->atomic_rd_en = (u8)attr->qp_access_flags; + } + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { + u8 rra_max; + + if (qp->resp_depth && !attr->max_dest_rd_atomic) { + /* + * Lowering our responder resources to zero. + * Turn off RDMA/atomics as responder. + * (RWE/RRE/RAE in params2 already zero) + */ + qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | + MTHCA_QP_OPTPAR_RRE | + MTHCA_QP_OPTPAR_RAE); + } + + if (!qp->resp_depth && attr->max_dest_rd_atomic) { + /* + * Increasing our responder resources from + * zero. Turn on RDMA/atomics as appropriate. + */ + qp_context->params2 |= + cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_WRITE ? + MTHCA_QP_BIT_RWE : 0); + qp_context->params2 |= + cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ? + MTHCA_QP_BIT_RRE : 0); + qp_context->params2 |= + cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ? + MTHCA_QP_BIT_RAE : 0); + + qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | + MTHCA_QP_OPTPAR_RRE | + MTHCA_QP_OPTPAR_RAE); + } + + for (rra_max = 0; + 1 << rra_max < attr->max_dest_rd_atomic && + rra_max < dev->qp_table.rdb_shift; + ++rra_max) + ; /* nothing */ + + qp_context->params2 |= cpu_to_be32(rra_max << 21); + qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX); + + qp->resp_depth = attr->max_dest_rd_atomic; + } + + qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); + + if (ibqp->srq) + qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC); + + if (attr_mask & IB_QP_MIN_RNR_TIMER) { + qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); + qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); + } + if (attr_mask & IB_QP_RQ_PSN) + qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); + + qp_context->ra_buff_indx = + cpu_to_be32(dev->qp_table.rdb_base + + ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << + dev->qp_table.rdb_shift)); + + qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn); + + if (mthca_is_memfree(dev)) + qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index); + + if (attr_mask & IB_QP_QKEY) { + qp_context->qkey = cpu_to_be32(attr->qkey); + qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); + } + + if (ibqp->srq) + qp_context->srqn = cpu_to_be32(1 << 24 | + to_msrq(ibqp->srq)->srqn); + + err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans, + qp->qpn, 0, mailbox, 0, &status); + if (status) { + mthca_warn(dev, "modify QP %d returned status %02x.\n", + state_table[cur_state][new_state].trans, status); + err = -EINVAL; + } + + if (!err) + qp->state = new_state; + + mthca_free_mailbox(dev, mailbox); + + if (is_sqp(dev, qp)) + store_attrs(to_msqp(qp), attr, attr_mask); + + /* + * If we are moving QP0 to RTR, bring the IB link up; if we + * are moving QP0 to RESET or ERROR, bring the link back down. + */ + if (is_qp0(dev, qp)) { + if (cur_state != IBQPS_RTR && + new_state == IBQPS_RTR) + init_port(dev, to_msqp(qp)->port); + + if (cur_state != IBQPS_RESET && + cur_state != IBQPS_ERR && + (new_state == IBQPS_RESET || + new_state == IBQPS_ERR)) + mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status); + } + + return err; +} + +/* + * Allocate and register buffer for WQEs. qp->rq.max, sq.max, + * rq.max_gs and sq.max_gs must all be assigned. + * mthca_alloc_wqe_buf will calculate rq.wqe_shift and + * sq.wqe_shift (as well as send_wqe_offset, is_direct, and + * queue) + */ +static int mthca_alloc_wqe_buf(struct mthca_dev *dev, + struct mthca_pd *pd, + struct mthca_qp *qp) +{ + int size; + int err = -ENOMEM; + + size = sizeof (struct mthca_next_seg) + + qp->rq.max_gs * sizeof (struct mthca_data_seg); + + for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; + qp->rq.wqe_shift++) + ; /* nothing */ + + size = sizeof (struct mthca_next_seg) + + qp->sq.max_gs * sizeof (struct mthca_data_seg); + switch (qp->transport) { + case MLX: + size += 2 * sizeof (struct mthca_data_seg); + break; + case UD: + if (mthca_is_memfree(dev)) + size += sizeof (struct mthca_arbel_ud_seg); + else + size += sizeof (struct mthca_tavor_ud_seg); + break; + default: + /* bind seg is as big as atomic + raddr segs */ + size += sizeof (struct mthca_bind_seg); + } + + for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; + qp->sq.wqe_shift++) + ; /* nothing */ + + qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift, + 1 << qp->sq.wqe_shift); + + /* + * If this is a userspace QP, we don't actually have to + * allocate anything. All we need is to calculate the WQE + * sizes and the send_wqe_offset, so we're done now. + */ + if (pd->ibpd.uobject) + return 0; + + size = (int)(LONG_PTR)PAGE_ALIGN(qp->send_wqe_offset + + (qp->sq.max << qp->sq.wqe_shift)); + + qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64), + GFP_KERNEL); + if (!qp->wrid) + goto err_out; + + err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE, + &qp->queue, &qp->is_direct, pd, 0, &qp->mr); + if (err) + goto err_out; + + return 0; + +err_out: + kfree(qp->wrid); + return err; +} + +static void mthca_free_wqe_buf(struct mthca_dev *dev, + struct mthca_qp *qp) +{ + mthca_buf_free(dev, (int)(LONG_PTR)PAGE_ALIGN(qp->send_wqe_offset + + (qp->sq.max << qp->sq.wqe_shift)), + &qp->queue, qp->is_direct, &qp->mr); + kfree(qp->wrid); +} + +static int mthca_map_memfree(struct mthca_dev *dev, + struct mthca_qp *qp) +{ + int ret; + + if (mthca_is_memfree(dev)) { + ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); + if (ret) + return ret; + + ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn); + if (ret) + goto err_qpc; + + ret = mthca_table_get(dev, dev->qp_table.rdb_table, + qp->qpn << dev->qp_table.rdb_shift); + if (ret) + goto err_eqpc; + + } + + return 0; + +err_eqpc: + mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); + +err_qpc: + mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); + + return ret; +} + +static void mthca_unmap_memfree(struct mthca_dev *dev, + struct mthca_qp *qp) +{ + mthca_table_put(dev, dev->qp_table.rdb_table, + qp->qpn << dev->qp_table.rdb_shift); + mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); + mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); +} + +static int mthca_alloc_memfree(struct mthca_dev *dev, + struct mthca_qp *qp) +{ + int ret = 0; + + if (mthca_is_memfree(dev)) { + qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ, + qp->qpn, &qp->rq.db); + if (qp->rq.db_index < 0) + return ret; + + qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, + qp->qpn, &qp->sq.db); + if (qp->sq.db_index < 0) + mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); + } + + return ret; +} + +static void mthca_free_memfree(struct mthca_dev *dev, + struct mthca_qp *qp) +{ + if (mthca_is_memfree(dev)) { + mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); + mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); + } +} + +static void mthca_wq_init(struct mthca_wq* wq) +{ + spin_lock_init(&wq->lock); + wq->next_ind = 0; + wq->last_comp = wq->max - 1; + wq->head = 0; + wq->tail = 0; +} + +static int mthca_alloc_qp_common(struct mthca_dev *dev, + struct mthca_pd *pd, + struct mthca_cq *send_cq, + struct mthca_cq *recv_cq, + enum ib_sig_type send_policy, + struct mthca_qp *qp) +{ + int ret; + int i; + + atomic_set(&qp->refcount, 1); + qp->state = IBQPS_RESET; + qp->atomic_rd_en = 0; + qp->resp_depth = 0; + qp->sq_policy = send_policy; + mthca_wq_init(&qp->sq); + mthca_wq_init(&qp->rq); + /*leo: seems like was missed */ + init_waitqueue_head(&qp->wait); + + ret = mthca_map_memfree(dev, qp); + if (ret) + return ret; + + ret = mthca_alloc_wqe_buf(dev, pd, qp); + if (ret) { + mthca_unmap_memfree(dev, qp); + return ret; + } + + /* + * If this is a userspace QP, we're done now. The doorbells + * will be allocated and buffers will be initialized in + * userspace. + */ + if (pd->ibpd.uobject) + return 0; + + ret = mthca_alloc_memfree(dev, qp); + if (ret) { + mthca_free_wqe_buf(dev, qp); + mthca_unmap_memfree(dev, qp); + return ret; + } + + if (mthca_is_memfree(dev)) { + struct mthca_next_seg *next; + struct mthca_data_seg *scatter; + int size = (sizeof (struct mthca_next_seg) + + qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; + + for (i = 0; i < qp->rq.max; ++i) { + next = get_recv_wqe(qp, i); + next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) << + qp->rq.wqe_shift); + next->ee_nds = cpu_to_be32(size); + + for (scatter = (void *) (next + 1); + (void *) scatter < (void *) ((u8*)next + (1 << qp->rq.wqe_shift)); + ++scatter) + scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); + } + + for (i = 0; i < qp->sq.max; ++i) { + next = get_send_wqe(qp, i); + next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) << + qp->sq.wqe_shift) + + qp->send_wqe_offset); + } + } + + qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); + qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); + + return 0; +} + +static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, + struct mthca_qp *qp) +{ + /* Sanity check QP size before proceeding */ + if (cap->max_send_wr > 65536 || cap->max_recv_wr > 65536 || + cap->max_send_sge > 64 || cap->max_recv_sge > 64) + return -EINVAL; + + if (mthca_is_memfree(dev)) { + qp->rq.max = cap->max_recv_wr ? + roundup_pow_of_two(cap->max_recv_wr) : 0; + qp->sq.max = cap->max_send_wr ? + roundup_pow_of_two(cap->max_send_wr) : 0; + } else { + qp->rq.max = cap->max_recv_wr; + qp->sq.max = cap->max_send_wr; + } + + qp->rq.max_gs = cap->max_recv_sge; + qp->sq.max_gs = MAX(cap->max_send_sge, + ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE, + MTHCA_INLINE_CHUNK_SIZE) / + sizeof (struct mthca_data_seg)); + + /* + * For MLX transport we need 2 extra S/G entries: + * one for the header and one for the checksum at the end + */ + if ((qp->transport == MLX && qp->sq.max_gs + 2 > dev->limits.max_sg) || + qp->sq.max_gs > dev->limits.max_sg || qp->rq.max_gs > dev->limits.max_sg) + return -EINVAL; + + return 0; +} + +int mthca_alloc_qp(struct mthca_dev *dev, + struct mthca_pd *pd, + struct mthca_cq *send_cq, + struct mthca_cq *recv_cq, + enum ib_qp_type_t type, + enum ib_sig_type send_policy, + struct ib_qp_cap *cap, + struct mthca_qp *qp) +{ + int err; + + err = mthca_set_qp_size(dev, cap, qp); + if (err) + return err; + + switch (type) { + case IB_QPT_RELIABLE_CONN: qp->transport = RC; break; + case IB_QPT_UNRELIABLE_CONN: qp->transport = UC; break; + case IB_QPT_UNRELIABLE_DGRM: qp->transport = UD; break; + default: return -EINVAL; + } + + qp->qpn = mthca_alloc(&dev->qp_table.alloc); + if (qp->qpn == -1) + return -ENOMEM; + + err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, + send_policy, qp); + if (err) { + mthca_free(&dev->qp_table.alloc, qp->qpn); + return err; + } + + spin_lock_irq(&dev->qp_table.lock); + mthca_array_set(&dev->qp_table.qp, + qp->qpn & (dev->limits.num_qps - 1), qp); + spin_unlock_irq(&dev->qp_table.lock); + + return 0; +} + +int mthca_alloc_sqp(struct mthca_dev *dev, + struct mthca_pd *pd, + struct mthca_cq *send_cq, + struct mthca_cq *recv_cq, + enum ib_sig_type send_policy, + struct ib_qp_cap *cap, + int qpn, + int port, + struct mthca_sqp *sqp) +{ + u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; + int err; + + err = mthca_set_qp_size(dev, cap, &sqp->qp); + if (err) + return err; + + sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; + sqp->header_buf = dma_alloc_coherent(dev, sqp->header_buf_size, + &sqp->header_dma, GFP_KERNEL); + if (!sqp->header_buf) + return -ENOMEM; + + spin_lock_irq(&dev->qp_table.lock); + if (mthca_array_get(&dev->qp_table.qp, mqpn)) + err = -EBUSY; + else + mthca_array_set(&dev->qp_table.qp, mqpn, sqp); + spin_unlock_irq(&dev->qp_table.lock); + + if (err) + goto err_out; + + sqp->port = port; + sqp->qp.qpn = mqpn; + sqp->qp.transport = MLX; + + err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, + send_policy, &sqp->qp); + if (err) + goto err_out_free; + + atomic_inc(&pd->sqp_count); + + return 0; + + err_out_free: + /* + * Lock CQs here, so that CQ polling code can do QP lookup + * without taking a lock. + */ + spin_lock_irq(&send_cq->lock); + if (send_cq != recv_cq) + spin_lock(&recv_cq->lock); + + spin_lock(&dev->qp_table.lock); + mthca_array_clear(&dev->qp_table.qp, mqpn); + spin_unlock(&dev->qp_table.lock); + + if (send_cq != recv_cq) + spin_unlock(&recv_cq->lock); + spin_unlock_irq(&send_cq->lock); + + err_out: + dma_free_coherent(dev, sqp->header_buf_size, + sqp->header_buf, sqp->header_dma); + + return err; +} + +void mthca_free_qp(struct mthca_dev *dev, + struct mthca_qp *qp) +{ + u8 status; + struct mthca_cq *send_cq; + struct mthca_cq *recv_cq; + + send_cq = to_mcq(qp->ibqp.send_cq); + recv_cq = to_mcq(qp->ibqp.recv_cq); + + /* + * Lock CQs here, so that CQ polling code can do QP lookup + * without taking a lock. + */ + spin_lock_irq(&send_cq->lock); + if (send_cq != recv_cq) + spin_lock(&recv_cq->lock); + + spin_lock(&dev->qp_table.lock); + mthca_array_clear(&dev->qp_table.qp, + qp->qpn & (dev->limits.num_qps - 1)); + spin_unlock(&dev->qp_table.lock); + + if (send_cq != recv_cq) + spin_unlock(&recv_cq->lock); + spin_unlock_irq(&send_cq->lock); + + atomic_dec(&qp->refcount); + wait_event(&qp->wait, !atomic_read(&qp->refcount)); + + if (qp->state != IBQPS_RESET) + mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status); + + /* + * If this is a userspace QP, the buffers, MR, CQs and so on + * will be cleaned up in userspace, so all we have to do is + * unref the mem-free tables and free the QPN in our table. + */ + if (!qp->ibqp.uobject) { + mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, + qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); + if (qp->ibqp.send_cq != qp->ibqp.recv_cq) + mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, + qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); + + mthca_free_memfree(dev, qp); + mthca_free_wqe_buf(dev, qp); + } + + mthca_unmap_memfree(dev, qp); + + if (is_sqp(dev, qp)) { + atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); + dma_free_coherent(dev, + to_msqp(qp)->header_buf_size, + to_msqp(qp)->header_buf, + to_msqp(qp)->header_dma); + } else + mthca_free(&dev->qp_table.alloc, qp->qpn); +} + +/* Create UD header for an MLX send and build a data segment for it */ +static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, + int ind, struct ib_send_wr *wr, + struct mthca_mlx_seg *mlx, + struct mthca_data_seg *data) +{ + int header_size; + int err; + u16 pkey; + + ib_ud_header_init(256, /* assume a MAD */ + sqp->ud_header.grh_present, + &sqp->ud_header); + + err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header); + if (err) + return err; + mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); + mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | + (sqp->ud_header.lrh.destination_lid == + IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) | + (sqp->ud_header.lrh.service_level << 8)); + mlx->rlid = sqp->ud_header.lrh.destination_lid; + mlx->vcrc = 0; + + switch (wr->opcode) { + case IB_WR_SEND: + sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; + sqp->ud_header.immediate_present = 0; + break; + case IB_WR_SEND_WITH_IMM: + sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; + sqp->ud_header.immediate_present = 1; + sqp->ud_header.immediate_data = wr->imm_data; + break; + default: + return -EINVAL; + } + + sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; + if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) + sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; + sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); + if (!sqp->qp.ibqp.qp_num) + ib_get_cached_pkey(&dev->ib_dev, (u8)sqp->port, + sqp->pkey_index, &pkey); + else + ib_get_cached_pkey(&dev->ib_dev, (u8)sqp->port, + wr->wr.ud.pkey_index, &pkey); + sqp->ud_header.bth.pkey = cpu_to_be16(pkey); + sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); + sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); + sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? + sqp->qkey : wr->wr.ud.remote_qkey); + sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); + + header_size = ib_ud_header_pack(&sqp->ud_header, + (u8*)sqp->header_buf + + ind * MTHCA_UD_HEADER_SIZE); + + data->byte_count = cpu_to_be32(header_size); + data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey); + data->addr = cpu_to_be64(sqp->header_dma + + ind * MTHCA_UD_HEADER_SIZE); + + return 0; +} + +static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, + struct ib_cq *ib_cq) +{ + unsigned cur; + struct mthca_cq *cq; + + cur = wq->head - wq->tail; + if (likely((int)cur + nreq < wq->max)) + return 0; + + cq = to_mcq(ib_cq); + spin_lock(&cq->lock); + cur = wq->head - wq->tail; + spin_unlock(&cq->lock); + + return (int)cur + nreq >= wq->max; +} + +int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + struct mthca_dev *dev = to_mdev(ibqp->device); + struct mthca_qp *qp = to_mqp(ibqp); + u8 *wqe; + u8 *prev_wqe; + int err = 0; + int nreq; + int i; + int size; + int size0 = 0; + u32 f0 = 0; + int ind; + u8 op0 = 0; + + spin_lock_irqsave(&qp->sq.lock); + + /* XXX check that state is OK to post send */ + + ind = qp->sq.next_ind; + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { + mthca_err(dev, "SQ %06x full (%u head, %u tail," + " %d max, %d nreq)\n", qp->qpn, + qp->sq.head, qp->sq.tail, + qp->sq.max, nreq); + err = -ENOMEM; + *bad_wr = wr; + goto out; + } + + wqe = get_send_wqe(qp, ind); + prev_wqe = qp->sq.last; + qp->sq.last = wqe; + + ((struct mthca_next_seg *) wqe)->nda_op = 0; + ((struct mthca_next_seg *) wqe)->ee_nds = 0; + ((struct mthca_next_seg *) wqe)->flags = + ((wr->send_flags & IB_SEND_SIGNALED) ? + cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | + ((wr->send_flags & IB_SEND_SOLICITED) ? + cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | + cpu_to_be32(1); + if (wr->opcode == IB_WR_SEND_WITH_IMM || + wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) + ((struct mthca_next_seg *) wqe)->imm = wr->imm_data; + + wqe += sizeof (struct mthca_next_seg); + size = sizeof (struct mthca_next_seg) / 16; + + switch (qp->transport) { + case RC: + switch (wr->opcode) { + case IB_WR_ATOMIC_CMP_AND_SWP: + case IB_WR_ATOMIC_FETCH_AND_ADD: + ((struct mthca_raddr_seg *) wqe)->raddr = + cpu_to_be64(wr->wr.atomic.remote_addr); + ((struct mthca_raddr_seg *) wqe)->rkey = + cpu_to_be32(wr->wr.atomic.rkey); + ((struct mthca_raddr_seg *) wqe)->reserved = 0; + + wqe += sizeof (struct mthca_raddr_seg); + + if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { + ((struct mthca_atomic_seg *) wqe)->swap_add = + cpu_to_be64(wr->wr.atomic.swap); + ((struct mthca_atomic_seg *) wqe)->compare = + cpu_to_be64(wr->wr.atomic.compare_add); + } else { + ((struct mthca_atomic_seg *) wqe)->swap_add = + cpu_to_be64(wr->wr.atomic.compare_add); + ((struct mthca_atomic_seg *) wqe)->compare = 0; + } + + wqe += sizeof (struct mthca_atomic_seg); + size += sizeof (struct mthca_raddr_seg) / 16 + + sizeof (struct mthca_atomic_seg); + break; + + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + case IB_WR_RDMA_READ: + ((struct mthca_raddr_seg *) wqe)->raddr = + cpu_to_be64(wr->wr.rdma.remote_addr); + ((struct mthca_raddr_seg *) wqe)->rkey = + cpu_to_be32(wr->wr.rdma.rkey); + ((struct mthca_raddr_seg *) wqe)->reserved = 0; + wqe += sizeof (struct mthca_raddr_seg); + size += sizeof (struct mthca_raddr_seg) / 16; + break; + + default: + /* No extra segments required for sends */ + break; + } + + break; + + case UC: + switch (wr->opcode) { + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + ((struct mthca_raddr_seg *) wqe)->raddr = + cpu_to_be64(wr->wr.rdma.remote_addr); + ((struct mthca_raddr_seg *) wqe)->rkey = + cpu_to_be32(wr->wr.rdma.rkey); + ((struct mthca_raddr_seg *) wqe)->reserved = 0; + wqe += sizeof (struct mthca_raddr_seg); + size += sizeof (struct mthca_raddr_seg) / 16; + break; + + default: + /* No extra segments required for sends */ + break; + } + + break; + + case UD: + ((struct mthca_tavor_ud_seg *) wqe)->lkey = + cpu_to_be32(to_mah(wr->wr.ud.ah)->key); + ((struct mthca_tavor_ud_seg *) wqe)->av_addr = + cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma); + ((struct mthca_tavor_ud_seg *) wqe)->dqpn = + cpu_to_be32(wr->wr.ud.remote_qpn); + ((struct mthca_tavor_ud_seg *) wqe)->qkey = + cpu_to_be32(wr->wr.ud.remote_qkey); + + wqe += sizeof (struct mthca_tavor_ud_seg); + size += sizeof (struct mthca_tavor_ud_seg) / 16; + break; + + case MLX: + err = build_mlx_header(dev, to_msqp(qp), ind, wr, + (void*)(wqe - sizeof (struct mthca_next_seg)), + (void*)wqe); + if (err) { + *bad_wr = wr; + goto out; + } + wqe += sizeof (struct mthca_data_seg); + size += sizeof (struct mthca_data_seg) / 16; + break; + } + + if (wr->num_sge > qp->sq.max_gs) { + mthca_err(dev, "too many gathers\n"); + err = -EINVAL; + *bad_wr = wr; + goto out; + } + + for (i = 0; i < wr->num_sge; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cpu_to_be32(wr->sg_list[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cpu_to_be32(wr->sg_list[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + cpu_to_be64(wr->sg_list[i].addr); + wqe += sizeof (struct mthca_data_seg); + size += sizeof (struct mthca_data_seg) / 16; + } + + /* Add one more inline data segment for ICRC */ + if (qp->transport == MLX) { + ((struct mthca_data_seg *) wqe)->byte_count = + cpu_to_be32((1 << 31) | 4); + ((u32 *) wqe)[1] = 0; + wqe += sizeof (struct mthca_data_seg); + size += sizeof (struct mthca_data_seg) / 16; + } + + qp->wrid[ind + qp->rq.max] = wr->wr_id; + + if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { + mthca_err(dev, "opcode invalid\n"); + err = -EINVAL; + *bad_wr = wr; + goto out; + } + + ((struct mthca_next_seg *) prev_wqe)->nda_op = + cpu_to_be32(((ind << qp->sq.wqe_shift) + + qp->send_wqe_offset) |mthca_opcode[wr->opcode]); + wmb(); + ((struct mthca_next_seg *) prev_wqe)->ee_nds = + cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) |size); + + if (!size0) { + size0 = size; + op0 = mthca_opcode[wr->opcode]; + } + + ++ind; + if (unlikely(ind >= qp->sq.max)) + ind -= qp->sq.max; + } + +out: + if (likely(nreq)) { + __be32 doorbell[2]; + + doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) + + qp->send_wqe_offset) | f0 | op0); + doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0); + + wmb(); + + mthca_write64(doorbell, + dev->kar + MTHCA_SEND_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); + } + + qp->sq.next_ind = ind; + qp->sq.head += nreq; + + spin_unlock_irqrestore(&qp->sq.lock); + return err; +} + +int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct mthca_dev *dev = to_mdev(ibqp->device); + struct mthca_qp *qp = to_mqp(ibqp); + int err = 0; + int nreq; + int i; + int size; + int size0 = 0; + int ind; + u8 *wqe; + u8 *prev_wqe; + + spin_lock_irqsave(&qp->rq.lock); + + /* XXX check that state is OK to post receive */ + + ind = qp->rq.next_ind; + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { + mthca_err(dev, "RQ %06x full (%u head, %u tail," + " %d max, %d nreq)\n", qp->qpn, + qp->rq.head, qp->rq.tail, + qp->rq.max, nreq); + err = -ENOMEM; + *bad_wr = wr; + goto out; + } + + wqe = get_recv_wqe(qp, ind); + prev_wqe = qp->rq.last; + qp->rq.last = wqe; + + ((struct mthca_next_seg *) wqe)->nda_op = 0; + ((struct mthca_next_seg *) wqe)->ee_nds = + cpu_to_be32(MTHCA_NEXT_DBD); + ((struct mthca_next_seg *) wqe)->flags = 0; + + wqe += sizeof (struct mthca_next_seg); + size = sizeof (struct mthca_next_seg) / 16; + + if (unlikely(wr->num_sge > qp->rq.max_gs)) { + err = -EINVAL; + *bad_wr = wr; + goto out; + } + + for (i = 0; i < wr->num_sge; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cpu_to_be32(wr->sg_list[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cpu_to_be32(wr->sg_list[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + cpu_to_be64(wr->sg_list[i].addr); + wqe += sizeof (struct mthca_data_seg); + size += sizeof (struct mthca_data_seg) / 16; + } + + qp->wrid[ind] = wr->wr_id; + + ((struct mthca_next_seg *) prev_wqe)->nda_op = + cpu_to_be32((ind << qp->rq.wqe_shift) | 1); + wmb(); + ((struct mthca_next_seg *) prev_wqe)->ee_nds = + cpu_to_be32(MTHCA_NEXT_DBD | size); + + if (!size0) + size0 = size; + + ++ind; + if (unlikely(ind >= qp->rq.max)) + ind -= qp->rq.max; + } + +out: + if (likely(nreq)) { + __be32 doorbell[2]; + + doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); + doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq); + + wmb(); + + mthca_write64(doorbell, + dev->kar + MTHCA_RECEIVE_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); + } + + qp->rq.next_ind = ind; + qp->rq.head += nreq; + + spin_unlock_irqrestore(&qp->rq.lock); + return err; +} + +int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + struct mthca_dev *dev = to_mdev(ibqp->device); + struct mthca_qp *qp = to_mqp(ibqp); + u8 *wqe; + u8 *prev_wqe; + int err = 0; + int nreq; + int i; + int size; + int size0 = 0; + u32 f0 = 0; + int ind; + u8 op0 = 0; + + spin_lock_irqsave(&qp->sq.lock); + + /* XXX check that state is OK to post send */ + + ind = qp->sq.head & (qp->sq.max - 1); + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { + mthca_err(dev, "SQ %06x full (%u head, %u tail," + " %d max, %d nreq)\n", qp->qpn, + qp->sq.head, qp->sq.tail, + qp->sq.max, nreq); + err = -ENOMEM; + *bad_wr = wr; + goto out; + } + + wqe = get_send_wqe(qp, ind); + prev_wqe = qp->sq.last; + qp->sq.last = wqe; + + ((struct mthca_next_seg *) wqe)->flags = + ((wr->send_flags & IB_SEND_SIGNALED) ? + cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | + ((wr->send_flags & IB_SEND_SOLICITED) ? + cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | + cpu_to_be32(1); + if (wr->opcode == IB_WR_SEND_WITH_IMM || + wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) + ((struct mthca_next_seg *) wqe)->imm = wr->imm_data; + + wqe += sizeof (struct mthca_next_seg); + size = sizeof (struct mthca_next_seg) / 16; + + switch (qp->transport) { + case RC: + switch (wr->opcode) { + case IB_WR_ATOMIC_CMP_AND_SWP: + case IB_WR_ATOMIC_FETCH_AND_ADD: + ((struct mthca_raddr_seg *) wqe)->raddr = + cpu_to_be64(wr->wr.atomic.remote_addr); + ((struct mthca_raddr_seg *) wqe)->rkey = + cpu_to_be32(wr->wr.atomic.rkey); + ((struct mthca_raddr_seg *) wqe)->reserved = 0; + + wqe += sizeof (struct mthca_raddr_seg); + + if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { + ((struct mthca_atomic_seg *) wqe)->swap_add = + cpu_to_be64(wr->wr.atomic.swap); + ((struct mthca_atomic_seg *) wqe)->compare = + cpu_to_be64(wr->wr.atomic.compare_add); + } else { + ((struct mthca_atomic_seg *) wqe)->swap_add = + cpu_to_be64(wr->wr.atomic.compare_add); + ((struct mthca_atomic_seg *) wqe)->compare = 0; + } + + wqe += sizeof (struct mthca_atomic_seg); + size += sizeof (struct mthca_raddr_seg) / 16 + + sizeof (struct mthca_atomic_seg); + break; + + case IB_WR_RDMA_READ: + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + ((struct mthca_raddr_seg *) wqe)->raddr = + cpu_to_be64(wr->wr.rdma.remote_addr); + ((struct mthca_raddr_seg *) wqe)->rkey = + cpu_to_be32(wr->wr.rdma.rkey); + ((struct mthca_raddr_seg *) wqe)->reserved = 0; + wqe += sizeof (struct mthca_raddr_seg); + size += sizeof (struct mthca_raddr_seg) / 16; + break; + + default: + /* No extra segments required for sends */ + break; + } + + break; + + case UC: + switch (wr->opcode) { + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + ((struct mthca_raddr_seg *) wqe)->raddr = + cpu_to_be64(wr->wr.rdma.remote_addr); + ((struct mthca_raddr_seg *) wqe)->rkey = + cpu_to_be32(wr->wr.rdma.rkey); + ((struct mthca_raddr_seg *) wqe)->reserved = 0; + wqe += sizeof (struct mthca_raddr_seg); + size += sizeof (struct mthca_raddr_seg) / 16; + break; + + default: + /* No extra segments required for sends */ + break; + } + + break; + + case UD: + memcpy(((struct mthca_arbel_ud_seg *) wqe)->av, + to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE); + ((struct mthca_arbel_ud_seg *) wqe)->dqpn = + cpu_to_be32(wr->wr.ud.remote_qpn); + ((struct mthca_arbel_ud_seg *) wqe)->qkey = + cpu_to_be32(wr->wr.ud.remote_qkey); + + wqe += sizeof (struct mthca_arbel_ud_seg); + size += sizeof (struct mthca_arbel_ud_seg) / 16; + break; + + case MLX: + err = build_mlx_header(dev, to_msqp(qp), ind, wr, + (void*)(wqe - sizeof (struct mthca_next_seg)), + (void*)wqe); + if (err) { + *bad_wr = wr; + goto out; + } + wqe += sizeof (struct mthca_data_seg); + size += sizeof (struct mthca_data_seg) / 16; + break; + } + + if (wr->num_sge > qp->sq.max_gs) { + mthca_err(dev, "too many gathers\n"); + err = -EINVAL; + *bad_wr = wr; + goto out; + } + + for (i = 0; i < wr->num_sge; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cpu_to_be32(wr->sg_list[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cpu_to_be32(wr->sg_list[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + cpu_to_be64(wr->sg_list[i].addr); + wqe += sizeof (struct mthca_data_seg); + size += sizeof (struct mthca_data_seg) / 16; + } + + /* Add one more inline data segment for ICRC */ + if (qp->transport == MLX) { + ((struct mthca_data_seg *) wqe)->byte_count = + cpu_to_be32((1 << 31) | 4); + ((u32 *) wqe)[1] = 0; + wqe += sizeof (struct mthca_data_seg); + size += sizeof (struct mthca_data_seg) / 16; + } + + qp->wrid[ind + qp->rq.max] = wr->wr_id; + + if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { + mthca_err(dev, "opcode invalid\n"); + err = -EINVAL; + *bad_wr = wr; + goto out; + } + + ((struct mthca_next_seg *) prev_wqe)->nda_op = + cpu_to_be32(((ind << qp->sq.wqe_shift) + + qp->send_wqe_offset) |mthca_opcode[wr->opcode]); + wmb(); + ((struct mthca_next_seg *) prev_wqe)->ee_nds = + cpu_to_be32(MTHCA_NEXT_DBD | size); + + if (!size0) { + size0 = size; + op0 = mthca_opcode[wr->opcode]; + } + + ++ind; + if (unlikely(ind >= qp->sq.max)) + ind -= qp->sq.max; + } + +out: + if (likely(nreq)) { + __be32 doorbell[2]; + + doorbell[0] = cpu_to_be32((nreq << 24) | + ((qp->sq.head & 0xffff) << 8) | + f0 | op0); + doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0); + + qp->sq.head += nreq; + + /* + * Make sure that descriptors are written before + * doorbell record. + */ + wmb(); + *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); + + /* + * Make sure doorbell record is written before we + * write MMIO send doorbell. + */ + wmb(); + mthca_write64(doorbell, + dev->kar + MTHCA_SEND_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); + } + + spin_unlock_irqrestore(&qp->sq.lock); + return err; +} + +int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct mthca_dev *dev = to_mdev(ibqp->device); + struct mthca_qp *qp = to_mqp(ibqp); + int err = 0; + int nreq; + int ind; + int i; + u8 *wqe; + + spin_lock_irqsave(&qp->rq.lock); + + /* XXX check that state is OK to post receive */ + + ind = qp->rq.head & (qp->rq.max - 1); + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { + mthca_err(dev, "RQ %06x full (%u head, %u tail," + " %d max, %d nreq)\n", qp->qpn, + qp->rq.head, qp->rq.tail, + qp->rq.max, nreq); + err = -ENOMEM; + *bad_wr = wr; + goto out; + } + + wqe = get_recv_wqe(qp, ind); + + ((struct mthca_next_seg *) wqe)->flags = 0; + + wqe += sizeof (struct mthca_next_seg); + + if (unlikely(wr->num_sge > qp->rq.max_gs)) { + err = -EINVAL; + *bad_wr = wr; + goto out; + } + + for (i = 0; i < wr->num_sge; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cpu_to_be32(wr->sg_list[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cpu_to_be32(wr->sg_list[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + cpu_to_be64(wr->sg_list[i].addr); + wqe += sizeof (struct mthca_data_seg); + } + + if (i < qp->rq.max_gs) { + ((struct mthca_data_seg *) wqe)->byte_count = 0; + ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); + ((struct mthca_data_seg *) wqe)->addr = 0; + } + + qp->wrid[ind] = wr->wr_id; + + ++ind; + if (unlikely(ind >= qp->rq.max)) + ind -= qp->rq.max; + } +out: + if (likely(nreq)) { + qp->rq.head += nreq; + + /* + * Make sure that descriptors are written before + * doorbell record. + */ + wmb(); + *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff); + } + + spin_unlock_irqrestore(&qp->rq.lock); + return err; +} + +int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, + int index, int *dbd, __be32 *new_wqe) +{ + struct mthca_next_seg *next; + + /* + * For SRQs, all WQEs generate a CQE, so we're always at the + * end of the doorbell chain. + */ + if (qp->ibqp.srq) { + *new_wqe = 0; + return 0; + } + + if (is_send) + next = get_send_wqe(qp, index); + else + next = get_recv_wqe(qp, index); + + *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD)); + if (next->ee_nds & cpu_to_be32(0x3f)) + *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) | + (next->ee_nds & cpu_to_be32(0x3f)); + else + *new_wqe = 0; + + return 0; +} + +int __devinit mthca_init_qp_table(struct mthca_dev *dev) +{ + int err; + u8 status; + int i; + + spin_lock_init(&dev->qp_table.lock); + fill_state_table(); + + /* + * We reserve 2 extra QPs per port for the special QPs. The + * special QP for port 1 has to be even, so round up. + */ + dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL; + err = mthca_alloc_init(&dev->qp_table.alloc, + dev->limits.num_qps, + (1 << 24) - 1, + dev->qp_table.sqp_start + + MTHCA_MAX_PORTS * 2); + if (err) + return err; + + err = mthca_array_init(&dev->qp_table.qp, + dev->limits.num_qps); + if (err) { + mthca_alloc_cleanup(&dev->qp_table.alloc); + return err; + } + + for (i = 0; i < 2; ++i) { + err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_QP1 : IB_QPT_QP0, + dev->qp_table.sqp_start + i * 2, + &status); + if (err) + goto err_out; + if (status) { + mthca_warn(dev, "CONF_SPECIAL_QP returned " + "status %02x, aborting.\n", + status); + err = -EINVAL; + goto err_out; + } + } + return 0; + + err_out: + mthca_CONF_SPECIAL_QP(dev, IB_QPT_QP1, 0, &status); + mthca_CONF_SPECIAL_QP(dev, IB_QPT_QP0, 0, &status); + + mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); + mthca_alloc_cleanup(&dev->qp_table.alloc); + + return err; +} + +void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev) +{ + int i; + u8 status; + + mthca_CONF_SPECIAL_QP(dev, IB_QPT_QP1, 0, &status); + mthca_CONF_SPECIAL_QP(dev, IB_QPT_QP0, 0, &status); + + mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); + mthca_alloc_cleanup(&dev->qp_table.alloc); +} diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_reset.c b/branches/MTHCA/hw/mthca/kernel/mthca_reset.c new file mode 100644 index 00000000..66447816 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_reset.c @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_reset.c 2100 2005-03-31 20:43:01Z roland $ + */ + +#include +#include +#include +#include +#include + +#include "mthca_dev.h" +#include "mthca_cmd.h" + +int mthca_reset(struct mthca_dev *mdev) +{ + int i; + int err = 0; + u32 *hca_header = NULL; + u32 *bridge_header = NULL; + struct pci_dev *bridge = NULL; + +#define MTHCA_RESET_OFFSET 0xf0010 +#define MTHCA_RESET_VALUE swab32(1) + + /* + * Reset the chip. This is somewhat ugly because we have to + * save off the PCI header before reset and then restore it + * after the chip reboots. We skip config space offsets 22 + * and 23 since those have a special meaning. + * + * To make matters worse, for Tavor (PCI-X HCA) we have to + * find the associated bridge device and save off its PCI + * header as well. + */ + + if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE)) { + /* Look for the bridge -- its device ID will be 2 more + than HCA's device ID. */ + while ((bridge = pci_get_device(mdev->pdev->vendor, + mdev->pdev->device + 2, + bridge)) != NULL) { + if (bridge->hdr_type == PCI_HEADER_TYPE_BRIDGE && + bridge->subordinate == mdev->pdev->bus) { + mthca_dbg(mdev, "Found bridge: %s (%s)\n", + pci_pretty_name(bridge), pci_name(bridge)); + break; + } + } + + if (!bridge) { + /* + * Didn't find a bridge for a Tavor device -- + * assume we're in no-bridge mode and hope for + * the best. + */ + mthca_warn(mdev, "No bridge found for %s (%s)\n", + pci_pretty_name(mdev->pdev), pci_name(mdev->pdev)); + } + + } + + /* For Arbel do we need to save off the full 4K PCI Express header?? */ + hca_header = kmalloc(256, GFP_KERNEL); + if (!hca_header) { + err = -ENOMEM; + mthca_err(mdev, "Couldn't allocate memory to save HCA " + "PCI header, aborting.\n"); + goto out; + } + + for (i = 0; i < 64; ++i) { + if (i == 22 || i == 23) + continue; + if (pci_read_config_dword(mdev->pdev, i * 4, hca_header + i)) { + err = -ENODEV; + mthca_err(mdev, "Couldn't save HCA " + "PCI header, aborting.\n"); + goto out; + } + } + + if (bridge) { + bridge_header = kmalloc(256, GFP_KERNEL); + if (!bridge_header) { + err = -ENOMEM; + mthca_err(mdev, "Couldn't allocate memory to save HCA " + "bridge PCI header, aborting.\n"); + goto out; + } + + for (i = 0; i < 64; ++i) { + if (i == 22 || i == 23) + continue; + if (pci_read_config_dword(bridge, i * 4, bridge_header + i)) { + err = -ENODEV; + mthca_err(mdev, "Couldn't save HCA bridge " + "PCI header, aborting.\n"); + goto out; + } + } + } + + /* actually hit reset */ + { + void __iomem *reset = ioremap(pci_resource_start(mdev->pdev, 0) + + MTHCA_RESET_OFFSET, 4); + + if (!reset) { + err = -ENOMEM; + mthca_err(mdev, "Couldn't map HCA reset register, " + "aborting.\n"); + goto out; + } + + writel(MTHCA_RESET_VALUE, reset); + iounmap(reset); + } + + /* Docs say to wait one second before accessing device */ + msleep(1000); + + /* Now wait for PCI device to start responding again */ + { + u32 v; + int c = 0; + + for (c = 0; c < 100; ++c) { + if (pci_read_config_dword(bridge ? bridge : mdev->pdev, 0, &v)) { + err = -ENODEV; + mthca_err(mdev, "Couldn't access HCA after reset, " + "aborting.\n"); + goto out; + } + + if (v != 0xffffffff) + goto good; + + msleep(100); + } + + err = -ENODEV; + mthca_err(mdev, "PCI device did not come back after reset, " + "aborting.\n"); + goto out; + } + +good: + /* Now restore the PCI headers */ + if (bridge) { + /* + * Bridge control register is at 0x3e, so we'll + * naturally restore it last in this loop. + */ + for (i = 0; i < 16; ++i) { + if (i * 4 == PCI_COMMAND) + continue; + + if (pci_write_config_dword(bridge, i * 4, bridge_header[i])) { + err = -ENODEV; + mthca_err(mdev, "Couldn't restore HCA bridge reg %x, " + "aborting.\n", i); + goto out; + } + } + + if (pci_write_config_dword(bridge, PCI_COMMAND, + bridge_header[PCI_COMMAND / 4])) { + err = -ENODEV; + mthca_err(mdev, "Couldn't restore HCA bridge COMMAND, " + "aborting.\n"); + goto out; + } + } + + for (i = 0; i < 16; ++i) { + if (i * 4 == PCI_COMMAND) + continue; + + if (pci_write_config_dword(mdev->pdev, i * 4, hca_header[i])) { + err = -ENODEV; + mthca_err(mdev, "Couldn't restore HCA reg %x, " + "aborting.\n", i); + goto out; + } + } + + if (pci_write_config_dword(mdev->pdev, PCI_COMMAND, + hca_header[PCI_COMMAND / 4])) { + err = -ENODEV; + mthca_err(mdev, "Couldn't restore HCA COMMAND, " + "aborting.\n"); + goto out; + } + +out: + if (bridge) + pci_dev_put(bridge); + kfree(bridge_header); + kfree(hca_header); + + return err; +} diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_srq.c b/branches/MTHCA/hw/mthca/kernel/mthca_srq.c new file mode 100644 index 00000000..c37c62ff --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_srq.c @@ -0,0 +1,577 @@ +/* + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $ + */ + +#include "mt_l2w.h" +#include "mthca_dev.h" +#include "mthca_cmd.h" +#include "mthca_memfree.h" +#include "mthca_wqe.h" + +enum { + MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE +}; + +struct mthca_tavor_srq_context { + __be64 wqe_base_ds; /* low 6 bits is descriptor size */ + __be32 state_pd; + __be32 lkey; + __be32 uar; + __be32 wqe_cnt; + u32 reserved[2]; +}; + +struct mthca_arbel_srq_context { + __be32 state_logsize_srqn; + __be32 lkey; + __be32 db_index; + __be32 logstride_usrpage; + __be64 wqe_base; + __be32 eq_pd; + __be16 limit_watermark; + __be16 wqe_cnt; + u16 reserved1; + __be16 wqe_counter; + u32 reserved2[3]; +}; + +static void *get_wqe(struct mthca_srq *srq, int n) +{ + if (srq->is_direct) + return srq->queue.direct.buf + (n << srq->wqe_shift); + else + return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + + ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); +} + +static void mthca_tavor_init_srq_context(struct mthca_dev *dev, + struct mthca_pd *pd, + struct mthca_srq *srq, + struct mthca_tavor_srq_context *context) +{ + RtlZeroMemory(context, sizeof *context); + + context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); + context->state_pd = cpu_to_be32(pd->pd_num); + context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); + + if (pd->ibpd.uobject) + context->uar = + cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); + else + context->uar = cpu_to_be32(dev->driver_uar.index); +} + +static void mthca_arbel_init_srq_context(struct mthca_dev *dev, + struct mthca_pd *pd, + struct mthca_srq *srq, + struct mthca_arbel_srq_context *context) +{ + int logsize; + + RtlZeroMemory(context, sizeof *context); + + logsize = long_log2(srq->max) + srq->wqe_shift; + context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); + context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); + context->db_index = cpu_to_be32(srq->db_index); + context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29); + if (pd->ibpd.uobject) + context->logstride_usrpage |= + cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); + else + context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index); + context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num); +} + +static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) +{ + mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue, + srq->is_direct, &srq->mr); + kfree(srq->wrid); +} + +static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, + struct mthca_srq *srq) +{ + struct mthca_data_seg *scatter; + u8 *wqe; + int err; + int i; + + if (pd->ibpd.uobject) + return 0; + + srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL); + if (!srq->wrid) + return -ENOMEM; + + err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift, + MTHCA_MAX_DIRECT_SRQ_SIZE, + &srq->queue, &srq->is_direct, pd, 1, &srq->mr); + if (err) { + kfree(srq->wrid); + return err; + } + + /* + * Now initialize the SRQ buffer so that all of the WQEs are + * linked into the list of free WQEs. In addition, set the + * scatter list L_Keys to the sentry value of 0x100. + */ + for (i = 0; i < srq->max; ++i) { + wqe = get_wqe(srq, i); + + *(int *) wqe = i < srq->max - 1 ? i + 1 : -1; + + for (scatter = (struct mthca_data_seg *)(wqe + sizeof (struct mthca_next_seg)); + (void *) scatter < (void*)(wqe + (1 << srq->wqe_shift)); + ++scatter) + scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); + } + + return 0; +} + +int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, + struct ib_srq_attr *attr, struct mthca_srq *srq) +{ + struct mthca_mailbox *mailbox; + u8 status; + int ds; + int err; + + /* Sanity check SRQ size before proceeding */ + if (attr->max_wr > 16 << 20 || attr->max_sge > 64) + return -EINVAL; + + srq->max = attr->max_wr; + srq->max_gs = attr->max_sge; + srq->counter = 0; + + if (mthca_is_memfree(dev)) + srq->max = roundup_pow_of_two(srq->max + 1); + + ds = min(64UL, + roundup_pow_of_two(sizeof (struct mthca_next_seg) + + srq->max_gs * sizeof (struct mthca_data_seg))); + srq->wqe_shift = long_log2(ds); + + srq->srqn = mthca_alloc(&dev->srq_table.alloc); + if (srq->srqn == -1) + return -ENOMEM; + + if (mthca_is_memfree(dev)) { + err = mthca_table_get(dev, dev->srq_table.table, srq->srqn); + if (err) + goto err_out; + + if (!pd->ibpd.uobject) { + srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ, + srq->srqn, &srq->db); + if (srq->db_index < 0) { + err = -ENOMEM; + goto err_out_icm; + } + } + } + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto err_out_db; + } + + err = mthca_alloc_srq_buf(dev, pd, srq); + if (err) + goto err_out_mailbox; + + spin_lock_init(&srq->lock); + atomic_set(&srq->refcount, 1); + init_waitqueue_head(&srq->wait); + + if (mthca_is_memfree(dev)) + mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf); + else + mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf); + + err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status); + + if (err) { + mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err); + goto err_out_free_buf; + } + if (status) { + mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n", + status); + err = -EINVAL; + goto err_out_free_buf; + } + + spin_lock_irq(&dev->srq_table.lock); + if (mthca_array_set(&dev->srq_table.srq, + srq->srqn & (dev->limits.num_srqs - 1), + srq)) { + spin_unlock_irq(&dev->srq_table.lock); + goto err_out_free_srq; + } + spin_unlock_irq(&dev->srq_table.lock); + + mthca_free_mailbox(dev, mailbox); + + srq->first_free = 0; + srq->last_free = srq->max - 1; + srq->last = get_wqe(srq, srq->max - 1); + + return 0; + +err_out_free_srq: + err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); + if (err) + mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); + else if (status) + mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); + +err_out_free_buf: + if (!pd->ibpd.uobject) + mthca_free_srq_buf(dev, srq); + +err_out_mailbox: + mthca_free_mailbox(dev, mailbox); + +err_out_db: + if (!pd->ibpd.uobject && mthca_is_memfree(dev)) + mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); + +err_out_icm: + mthca_table_put(dev, dev->srq_table.table, srq->srqn); + +err_out: + mthca_free(&dev->srq_table.alloc, srq->srqn); + + return err; +} + +void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) +{ + struct mthca_mailbox *mailbox; + int err; + u8 status; + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) { + mthca_warn(dev, "No memory for mailbox to free SRQ.\n"); + return; + } + + err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); + if (err) + mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); + else if (status) + mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); + + spin_lock_irq(&dev->srq_table.lock); + mthca_array_clear(&dev->srq_table.srq, + srq->srqn & (dev->limits.num_srqs - 1)); + spin_unlock_irq(&dev->srq_table.lock); + + atomic_dec(&srq->refcount); + wait_event(&srq->wait, !atomic_read(&srq->refcount)); + + if (!srq->ibsrq.uobject) { + mthca_free_srq_buf(dev, srq); + if (mthca_is_memfree(dev)) + mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); + } + + mthca_table_put(dev, dev->srq_table.table, srq->srqn); + mthca_free(&dev->srq_table.alloc, srq->srqn); + mthca_free_mailbox(dev, mailbox); +} + +void mthca_srq_event(struct mthca_dev *dev, u32 srqn, + enum ib_event_type event_type) +{ + struct mthca_srq *srq; + struct ib_event event; + + spin_lock(&dev->srq_table.lock); + srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); + if (srq) + atomic_inc(&srq->refcount); + spin_unlock(&dev->srq_table.lock); + + if (!srq) { + mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn); + return; + } + + if (!srq->ibsrq.event_handler) + goto out; + + event.device = &dev->ib_dev; + event.event = event_type; + event.element.srq = &srq->ibsrq; + srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); + +out: + if (atomic_dec_and_test(&srq->refcount)) + wake_up(&srq->wait); +} + +/* + * This function must be called with IRQs disabled. + */ +void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) +{ + int ind; + + ind = wqe_addr >> srq->wqe_shift; + + spin_lock(&srq->lock); + + if (likely(srq->first_free >= 0)) + *(int *) get_wqe(srq, srq->last_free) = ind; + else + srq->first_free = ind; + + *(int *) get_wqe(srq, ind) = -1; + srq->last_free = ind; + + spin_unlock(&srq->lock); +} + +//TODO: is this code correct at all ? +int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct mthca_dev *dev = to_mdev(ibsrq->device); + struct mthca_srq *srq = to_msrq(ibsrq); + int err = 0; + int first_ind; + int ind; + int next_ind; + int nreq; + int i; + u8 *wqe; + u8 *prev_wqe; + + spin_lock_irqsave(&srq->lock); + + first_ind = srq->first_free; + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + ind = srq->first_free; + + if (ind < 0) { + mthca_err(dev, "SRQ %06x full\n", srq->srqn); + err = -ENOMEM; + *bad_wr = wr; + goto out; + } + + wqe = get_wqe(srq, ind); + next_ind = *(int *) wqe; + prev_wqe = srq->last; + srq->last = wqe; + + ((struct mthca_next_seg *) wqe)->nda_op = 0; + ((struct mthca_next_seg *) wqe)->ee_nds = 0; + /* flags field will always remain 0 */ + + wqe += sizeof (struct mthca_next_seg); + + if (unlikely(wr->num_sge > srq->max_gs)) { + err = -EINVAL; + *bad_wr = wr; + srq->last = prev_wqe; + goto out; + } + + for (i = 0; i < wr->num_sge; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cpu_to_be32(wr->sg_list[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cpu_to_be32(wr->sg_list[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + cpu_to_be64(wr->sg_list[i].addr); + wqe += sizeof (struct mthca_data_seg); + } + + if (i < srq->max_gs) { + ((struct mthca_data_seg *) wqe)->byte_count = 0; + ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); + ((struct mthca_data_seg *) wqe)->addr = 0; + } + + ((struct mthca_next_seg *) prev_wqe)->nda_op = + cpu_to_be32((ind << srq->wqe_shift) | 1); + wmb(); + ((struct mthca_next_seg *) prev_wqe)->ee_nds = + cpu_to_be32(MTHCA_NEXT_DBD); + + srq->wrid[ind] = wr->wr_id; + srq->first_free = next_ind; + } + +out: + if (likely(nreq)) { + __be32 doorbell[2]; + + doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); + doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq); + + /* + * Make sure that descriptors are written before + * doorbell is rung. + */ + wmb(); + + mthca_write64(doorbell, + dev->kar + MTHCA_RECEIVE_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); + } + + spin_unlock_irqrestore(&srq->lock); + return err; +} + +//TODO: is this code correct at all ? +int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct mthca_dev *dev = to_mdev(ibsrq->device); + struct mthca_srq *srq = to_msrq(ibsrq); + int err = 0; + int ind; + int next_ind; + int nreq; + int i; + u8 *wqe; + + spin_lock_irqsave(&srq->lock); + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + ind = srq->first_free; + + if (ind < 0) { + mthca_err(dev, "SRQ %06x full\n", srq->srqn); + err = -ENOMEM; + *bad_wr = wr; + goto out; + } + + wqe = get_wqe(srq, ind); + next_ind = *(int *) wqe; + + ((struct mthca_next_seg *) wqe)->nda_op = 0; + cpu_to_be32((next_ind << srq->wqe_shift) | 1); + ((struct mthca_next_seg *) wqe)->ee_nds = 0; + /* flags field will always remain 0 */ + + wqe += sizeof (struct mthca_next_seg); + + if (unlikely(wr->num_sge > srq->max_gs)) { + err = -EINVAL; + *bad_wr = wr; + goto out; + } + + for (i = 0; i < wr->num_sge; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cpu_to_be32(wr->sg_list[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cpu_to_be32(wr->sg_list[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + cpu_to_be64(wr->sg_list[i].addr); + wqe += sizeof (struct mthca_data_seg); + } + + if (i < srq->max_gs) { + ((struct mthca_data_seg *) wqe)->byte_count = 0; + ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); + ((struct mthca_data_seg *) wqe)->addr = 0; + } + + srq->wrid[ind] = wr->wr_id; + srq->first_free = next_ind; + } + +out: + if (likely(nreq)) { + srq->counter += (u16)nreq; + + /* + * Make sure that descriptors are written before + * we write doorbell record. + */ + wmb(); + *srq->db = cpu_to_be32(srq->counter); + } + + spin_unlock_irqrestore(&srq->lock); + return err; +} + +int __devinit mthca_init_srq_table(struct mthca_dev *dev) +{ + int err; + + if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) + return 0; + + spin_lock_init(&dev->srq_table.lock); + + err = mthca_alloc_init(&dev->srq_table.alloc, + dev->limits.num_srqs, + dev->limits.num_srqs - 1, + dev->limits.reserved_srqs); + if (err) + return err; + + err = mthca_array_init(&dev->srq_table.srq, + dev->limits.num_srqs); + if (err) + mthca_alloc_cleanup(&dev->srq_table.alloc); + + return err; +} + +void __devexit mthca_cleanup_srq_table(struct mthca_dev *dev) +{ + if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) + return; + + mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs); + mthca_alloc_cleanup(&dev->srq_table.alloc); +} diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_uar.c b/branches/MTHCA/hw/mthca/kernel/mthca_uar.c new file mode 100644 index 00000000..a68644e1 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_uar.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_uar.c 2643 2005-06-16 22:48:17Z roland $ + */ + +#include "mthca_dev.h" +#include "mthca_memfree.h" + +int mthca_uar_alloc(struct mthca_dev *dev, struct mthca_uar *uar) +{ + uar->index = mthca_alloc(&dev->uar_table.alloc); + if (uar->index == -1) + return -ENOMEM; + + uar->pfn = (unsigned long)(pci_resource_start(dev, HCA_BAR_TYPE_UAR) >> PAGE_SHIFT) + uar->index; + + return 0; +} + +void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar) +{ + mthca_free(&dev->uar_table.alloc, uar->index); +} + +int mthca_init_uar_table(struct mthca_dev *dev) +{ + int ret; + + ret = mthca_alloc_init(&dev->uar_table.alloc, + dev->limits.num_uars, + dev->limits.num_uars - 1, + dev->limits.reserved_uars); + if (ret) + return ret; + + ret = mthca_init_db_tab(dev); + if (ret) + mthca_alloc_cleanup(&dev->uar_table.alloc); + + return ret; +} + +void mthca_cleanup_uar_table(struct mthca_dev *dev) +{ + mthca_cleanup_db_tab(dev); + + /* XXX check if any UARs are still allocated? */ + mthca_alloc_cleanup(&dev->uar_table.alloc); +} diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_user.h b/branches/MTHCA/hw/mthca/kernel/mthca_user.h new file mode 100644 index 00000000..dd46c5c5 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_user.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef MTHCA_USER_H +#define MTHCA_USER_H + +/* + * Make sure that all structs defined in this file remain laid out so + * that they pack the same way on 32-bit and 64-bit architectures (to + * avoid incompatibility between 32-bit userspace and 64-bit kernels). + * In particular do not use pointer types -- pass pointers in u64 + * instead. + */ + +struct mthca_alloc_ucontext_resp { + u32 qp_tab_size; + u32 uarc_size; +}; + +struct mthca_alloc_pd_resp { + u32 pdn; + u32 reserved; +}; + +struct mthca_create_cq { + u32 lkey; + u32 pdn; + u64 arm_db_page; + u64 set_db_page; + u32 arm_db_index; + u32 set_db_index; +}; + +struct mthca_create_cq_resp { + u32 cqn; + u32 reserved; +}; + +struct mthca_create_srq { + u32 lkey; + u32 db_index; + u64 db_page; +}; + +struct mthca_create_srq_resp { + u32 srqn; + u32 reserved; +}; + +struct mthca_create_qp { + u32 lkey; + u32 reserved; + u64 sq_db_page; + u64 rq_db_page; + u32 sq_db_index; + u32 rq_db_index; +}; + +#endif /* MTHCA_USER_H */ diff --git a/branches/MTHCA/hw/mthca/kernel/mthca_wqe.h b/branches/MTHCA/hw/mthca/kernel/mthca_wqe.h new file mode 100644 index 00000000..5a68c665 --- /dev/null +++ b/branches/MTHCA/hw/mthca/kernel/mthca_wqe.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mthca_wqe.h 3047 2005-08-10 03:59:35Z roland $ + */ + +#ifndef MTHCA_WQE_H +#define MTHCA_WQE_H + +enum { + MTHCA_NEXT_DBD = 1 << 7, + MTHCA_NEXT_FENCE = 1 << 6, + MTHCA_NEXT_CQ_UPDATE = 1 << 3, + MTHCA_NEXT_EVENT_GEN = 1 << 2, + MTHCA_NEXT_SOLICIT = 1 << 1, + + MTHCA_MLX_VL15 = 1 << 17, + MTHCA_MLX_SLR = 1 << 16 +}; + +enum { + MTHCA_INVAL_LKEY = 0x100 +}; + +struct mthca_next_seg { + __be32 nda_op; /* [31:6] next WQE [4:0] next opcode */ + __be32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */ + __be32 flags; /* [3] CQ [2] Event [1] Solicit */ + __be32 imm; /* immediate data */ +}; + +struct mthca_tavor_ud_seg { + u32 reserved1; + __be32 lkey; + __be64 av_addr; + u32 reserved2[4]; + __be32 dqpn; + __be32 qkey; + u32 reserved3[2]; +}; + +struct mthca_arbel_ud_seg { + __be32 av[8]; + __be32 dqpn; + __be32 qkey; + u32 reserved[2]; +}; + +struct mthca_bind_seg { + __be32 flags; /* [31] Atomic [30] rem write [29] rem read */ + u32 reserved; + __be32 new_rkey; + __be32 lkey; + __be64 addr; + __be64 length; +}; + +struct mthca_raddr_seg { + __be64 raddr; + __be32 rkey; + u32 reserved; +}; + +struct mthca_atomic_seg { + __be64 swap_add; + __be64 compare; +}; + +struct mthca_data_seg { + __be32 byte_count; + __be32 lkey; + __be64 addr; +}; + +struct mthca_mlx_seg { + __be32 nda_op; + __be32 nds; + __be32 flags; /* [17] VL15 [16] SLR [14:12] static rate + [11:8] SL [3] C [2] E */ + __be16 rlid; + __be16 vcrc; +}; + +#endif /* MTHCA_WQE_H */ diff --git a/branches/MTHCA/hw/mthca/readme.txt b/branches/MTHCA/hw/mthca/readme.txt new file mode 100644 index 00000000..cd615985 --- /dev/null +++ b/branches/MTHCA/hw/mthca/readme.txt @@ -0,0 +1,48 @@ +16 Oct 2005 + +1. Status quo + The low level driver fails on start-up after creating cq. + Part of control path functions are not yet implemented. + No data path function supported. + Work from userland is not supported at all. + The source code is in a deeply preliminary state (see more explanations below). + +2. Main intentions of work + - unite the shim with low level driver (in the current stack, the shim is a filter driver over low-level one); + - develop the low-level driver on base of Linux Gen2 low-level driver, keeping it as similar as possible; + - try (temporary) to preserve the Linux look of the driver (for facilitating bug fixes for the time being); + - try not to make changes in IBAL layer; + - try to make shim as thin as possible; + +3. Implementation decisions + - all header files are placed under inc\kernel\mthca + -- user header files will go to inc\user\mthca + - all driver files are placed under hw\mthca\kernel + -- userland suppotr files will go to hw\mthca\user + - name convention: + hca_xxx files - the shim (called sometimes also "driver"); + mthca_xxx files - Gen2 ported files (vpd, verb provider, HCAVP); + mt_xxx files - auxiliery files; part of them was taken from Gen2 core; + +4. The source code state + a) For facilitation of my work on the stage of coding/porting, i usually do not remove the old code, but put it under undefined preprocessor symbols. + LINUX_TO_BE_REMOVED - code in HCAVP to be removed as irrelevant to Windows + LINUX_TO_BE_CHANGED - code in HCAVP to be ported to Windows; + WIN_TO_BE_REMOVED - code in driver to be removed (usually, because it supports the old HCAVP) + WIN_TO_BE_CHANGED - code in driver to be adjusted to new HCAVP; + WIN_USER_SUPPORT - code to be added/changed for userland support + + b) in some cases i put some other marks in the form of comments: + //TODO - something not yet done or looks errorprone; need to be revised before release; + //NB - kind of warning; doesn't look dangerous to me; + /* leo - have different meaning; really i'm going to replace it by the two above marks; + ??? - usually comes to enforce TODO; is to be replaced by TODO only; + +5. Techincal details + Gen2 driver was taken 08/11 (SVN revision - 3056) + WinIb stack was branched at 08/10 (from trunk SVN revision 260) + + + + + diff --git a/branches/MTHCA/inc/kernel/mthca/ib_at.h b/branches/MTHCA/inc/kernel/mthca/ib_at.h new file mode 100644 index 00000000..fd275409 --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/ib_at.h @@ -0,0 +1,218 @@ +/* + * Copyright (c) 2004,2005 Voltaire Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * + * $Id: ib_at.h 3025 2005-08-08 20:38:20Z halr $ + */ + +#if !defined( IB_AT_H ) +#define IB_AT_H + +#include +#include + +enum ib_at_multipathing_type { + IB_AT_PATH_SAME_PORT = 0, + IB_AT_PATH_SAME_HCA = 1, /* but different ports if applicable */ + IB_AT_PATH_SAME_SYSTEM = 2, /* but different ports if applicable */ + IB_AT_PATH_INDEPENDENT_HCA = 3, + IB_AT_PATH_SRC_ROUTE = 4, /* application controlled multipathing */ +}; + +enum ib_at_route_flags { + IB_AT_ROUTE_USE_DEFAULTS = 0, + IB_AT_ROUTE_FORCE_ATS = 1, + IB_AT_ROUTE_FORCE_ARP = 2, + IB_AT_ROUTE_FORCE_RESOLVE = 4, +}; + +struct ib_at_path_attr { + u16 qos_tag; + __be16 pkey; + u8 multi_path_type; +}; + +struct ib_at_ib_route { + union ib_gid sgid; + union ib_gid dgid; + struct ib_device *out_dev; + int out_port; + struct ib_at_path_attr attr; +}; + +enum ib_at_op_status { + IB_AT_STATUS_INVALID = 0, + IB_AT_STATUS_PENDING = 1, + IB_AT_STATUS_COMPLETED = 2, + IB_AT_STATUS_ERROR = 3, + IB_AT_STATUS_CANCELED = 4, +}; + +/* + * ib_at_completion structure - callback function parameters structure + * @completion: completion call back function + * @context: user defined context pointer + * @req_id: asynchronous request ID - optional, out + * + * The asynchronous resolution function behavior is as follows: + * If the resolve operation can be fulfilled immediately, then the output + * structures are set and the number of filled structures is returned. + * + * If the resolve operation cannot by fulfilled immediately and + * an ib_at_completion structure is not provided, + * then the function immediately returns -EWOULDBLOCK. + * + * If ib_at_completion structure is provided and an asynchronous + * operation is started, the function immediately returns zero, + * and the request ID field (req_id) is set if the pointer is + * non NULL. This request ID may be used to cancel the operation, + * or to poll its status. + * + * When an asynchronous operation completes (successfully or not), + * the callback function is called, passing the request ID, + * the supplied user context and the number of output structures. + * If the asynchronous operation did not complete, a negative + * error code is return as the 'rec_num'. + * Valid error codes are: + * -EINTR: operation is canceled + * -EIO: request send failed + * -ETIMEOUT: operation timed out + * + * Returned value of zero records means that the resolution process + * completed, but the given address could not be resolved at this time. + */ +struct ib_at_completion { + void (*fn)(u64 req_id, void *context, int rec_num); + void *context; + u64 req_id; +}; + +/** + * ib_at_route_by_ip - asynchronously resolve ip address to ib route + * @dst_ip: destination ip + * @src_ip: source ip - optional + * @tos: ip type of service + * @flags: ib_at_route_flags + * @ib_route: out structure + * @async_comp: asynchronous callback structure - optional + * + * Resolve the specified dst_ip to a &struct ib_route structure. + * src_ip can be provided to force specific output interface. + * flags can be used to select resolving method; currently IB-ARP or ATS. + * + * See ib_at_completion structure documentation for asynchronous + * operation details. + */ +int ib_at_route_by_ip(u32 dst_ip, u32 src_ip, int tos, u16 flags, + struct ib_at_ib_route *ib_route, + struct ib_at_completion *async_comp); + +/** + * ib_at_paths_by_route - asynchronously resolve ib route to ib path records + * @ib_route: ib route to resolve + * @mpath_type: ib_at_multipathing_type + * @path_arr: SA path record array - out + * @npath: maximal number of paths to return + * @async_comp: asynchronous callback structure - optional + * + * Resolve the specified ib_route to a SA path record array. + * Number of returned paths will not exceed npath. + * Multipathing type may be used to obtain redundant paths for APM, + * other failover schemes, bandwidth aggregation or source based routing. + * Note that multipathing request is meaningless unless npath is greater than 1. + * + * Returned ib_route structure includes the recommended pkey and qos_tag for + * this route. + * + * See ib_at_completion structure documentation for asynchronous operation + * details. + */ +int ib_at_paths_by_route(struct ib_at_ib_route *ib_route, u32 mpath_type, + struct ib_sa_path_rec *path_arr, int npath, + struct ib_at_completion *async_comp); + +/** + * ib_at_ips_by_gid - asynchronously resolve GID to IP addresses + * @gid: GID to resolve + * @dst_ips: array of IPs, out + * @nips: number of IP entries in dst_ips array + * @async_comp: asynchronous callback structure - optional + * + * Resolve the gid to IP addresses, but not more than nips. + * This function rely on the IB-ATS mechanism. + * + * See ib_at_completion structure documentation for asynchronous + * operation details. + */ +int ib_at_ips_by_gid(union ib_gid *gid, u32 *dst_ips, int nips, + struct ib_at_completion *async_comp); + +/** + * ib_at_ips_by_subnet - return local IP addresses by IP subnet + * @network: network to resolve - optional + * @netmask: subnet net mask - optional + * @dst_ips: array of IPs, out + * @nips: number of IP entries in dst_ips array + * + * Return local IP addresses matching the network and netmask, + * but not more than nips. + * + * Note that network and netmask as 0x0 or 0xffffffff returns all local IPs. + */ +int ib_at_ips_by_subnet(u32 network, u32 netmask, u32 *dst_ips, int nips); + +/** + * ib_at_invalidate_paths - invalidate possibly cached paths keyed by ib_route + * @ib_route: paths key - optional + * + * Returns number of invalidated paths. + * If ib_route is NULL, then the entire cache will be flushed. + */ +int ib_at_invalidate_paths(struct ib_at_ib_route *ib_route); + +/** + * ib_at_cancel - cancel possible active asynchronous operation + * @req_id: asynchronous request ID + * + * Return 0 if canceled, -1 if cancel failed (e.g. bad ID) + */ +int ib_at_cancel(u64 req_id); + +/** + * ib_at_status - poll asynchronous operation's status + * @req_id: asynchronous request ID ib_at_op_status + * + * Return non-negative ib_at_op_status value, + * or -EINVAL if the request ID is invalid. + */ +int ib_at_status(u64 req_id); + +#endif /* IB_AT_H */ diff --git a/branches/MTHCA/inc/kernel/mthca/ib_cache.h b/branches/MTHCA/inc/kernel/mthca/ib_cache.h new file mode 100644 index 00000000..be3ca4fc --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/ib_cache.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Intel Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: ib_cache.h 2730 2005-06-28 16:43:03Z sean.hefty $ + */ + +#ifndef _IB_CACHE_H +#define _IB_CACHE_H + +#include + +/** + * ib_get_cached_gid - Returns a cached GID table entry + * @device: The device to query. + * @port_num: The port number of the device to query. + * @index: The index into the cached GID table to query. + * @gid: The GID value found at the specified index. + * + * ib_get_cached_gid() fetches the specified GID table entry stored in + * the local software cache. + */ +int ib_get_cached_gid(struct ib_device *device, + u8 port_num, + int index, + union ib_gid *gid); + +/** + * ib_find_cached_gid - Returns the port number and GID table index where + * a specified GID value occurs. + * @device: The device to query. + * @gid: The GID value to search for. + * @port_num: The port number of the device where the GID value was found. + * @index: The index into the cached GID table where the GID was found. This + * parameter may be NULL. + * + * ib_find_cached_gid() searches for the specified GID value in + * the local software cache. + */ +int ib_find_cached_gid(struct ib_device *device, + union ib_gid *gid, + u8 *port_num, + u16 *index); + +/** + * ib_get_cached_pkey - Returns a cached PKey table entry + * @device: The device to query. + * @port_num: The port number of the device to query. + * @index: The index into the cached PKey table to query. + * @pkey: The PKey value found at the specified index. + * + * ib_get_cached_pkey() fetches the specified PKey table entry stored in + * the local software cache. + */ +int ib_get_cached_pkey(struct ib_device *device_handle, + u8 port_num, + int index, + u16 *pkey); + +/** + * ib_find_cached_pkey - Returns the PKey table index where a specified + * PKey value occurs. + * @device: The device to query. + * @port_num: The port number of the device to search for the PKey. + * @pkey: The PKey value to search for. + * @index: The index into the cached PKey table where the PKey was found. + * + * ib_find_cached_pkey() searches the specified PKey table in + * the local software cache. + */ +int ib_find_cached_pkey(struct ib_device *device, + u8 port_num, + u16 pkey, + u16 *index); + + +int ib_cache_setup(void); +void ib_cache_cleanup(void); + +#endif /* _IB_CACHE_H */ diff --git a/branches/MTHCA/inc/kernel/mthca/ib_cm.h b/branches/MTHCA/inc/kernel/mthca/ib_cm.h new file mode 100644 index 00000000..b7db2a9f --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/ib_cm.h @@ -0,0 +1,568 @@ +/* + * Copyright (c) 2004 Intel Corporation. All rights reserved. + * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: ib_cm.h 2930 2005-07-28 19:22:44Z sean.hefty $ + */ +#if !defined(IB_CM_H) +#define IB_CM_H + +#include +#include + +enum ib_cm_state { + IB_CM_IDLE, + IB_CM_LISTEN, + IB_CM_REQ_SENT, + IB_CM_REQ_RCVD, + IB_CM_MRA_REQ_SENT, + IB_CM_MRA_REQ_RCVD, + IB_CM_REP_SENT, + IB_CM_REP_RCVD, + IB_CM_MRA_REP_SENT, + IB_CM_MRA_REP_RCVD, + IB_CM_ESTABLISHED, + IB_CM_DREQ_SENT, + IB_CM_DREQ_RCVD, + IB_CM_TIMEWAIT, + IB_CM_SIDR_REQ_SENT, + IB_CM_SIDR_REQ_RCVD +}; + +enum ib_cm_lap_state { + IB_CM_LAP_IDLE, + IB_CM_LAP_SENT, + IB_CM_LAP_RCVD, + IB_CM_MRA_LAP_SENT, + IB_CM_MRA_LAP_RCVD, +}; + +enum ib_cm_event_type { + IB_CM_REQ_ERROR, + IB_CM_REQ_RECEIVED, + IB_CM_REP_ERROR, + IB_CM_REP_RECEIVED, + IB_CM_RTU_RECEIVED, + IB_CM_USER_ESTABLISHED, + IB_CM_DREQ_ERROR, + IB_CM_DREQ_RECEIVED, + IB_CM_DREP_RECEIVED, + IB_CM_TIMEWAIT_EXIT, + IB_CM_MRA_RECEIVED, + IB_CM_REJ_RECEIVED, + IB_CM_LAP_ERROR, + IB_CM_LAP_RECEIVED, + IB_CM_APR_RECEIVED, + IB_CM_SIDR_REQ_ERROR, + IB_CM_SIDR_REQ_RECEIVED, + IB_CM_SIDR_REP_RECEIVED +}; + +enum ib_cm_data_size { + IB_CM_REQ_PRIVATE_DATA_SIZE = 92, + IB_CM_MRA_PRIVATE_DATA_SIZE = 222, + IB_CM_REJ_PRIVATE_DATA_SIZE = 148, + IB_CM_REP_PRIVATE_DATA_SIZE = 196, + IB_CM_RTU_PRIVATE_DATA_SIZE = 224, + IB_CM_DREQ_PRIVATE_DATA_SIZE = 220, + IB_CM_DREP_PRIVATE_DATA_SIZE = 224, + IB_CM_REJ_ARI_LENGTH = 72, + IB_CM_LAP_PRIVATE_DATA_SIZE = 168, + IB_CM_APR_PRIVATE_DATA_SIZE = 148, + IB_CM_APR_INFO_LENGTH = 72, + IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216, + IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136, + IB_CM_SIDR_REP_INFO_LENGTH = 72 +}; + +struct ib_cm_id; + +struct ib_cm_req_event_param { + struct ib_cm_id *listen_id; + struct ib_device *device; + u8 port; + + struct ib_sa_path_rec *primary_path; + struct ib_sa_path_rec *alternate_path; + + __be64 remote_ca_guid; + u32 remote_qkey; + u32 remote_qpn; + enum ib_qp_type_t qp_type; + + u32 starting_psn; + u8 responder_resources; + u8 initiator_depth; + unsigned int local_cm_response_timeout:5; + unsigned int flow_control:1; + unsigned int remote_cm_response_timeout:5; + unsigned int retry_count:3; + unsigned int rnr_retry_count:3; + unsigned int srq:1; +}; + +struct ib_cm_rep_event_param { + __be64 remote_ca_guid; + u32 remote_qkey; + u32 remote_qpn; + u32 starting_psn; + u8 responder_resources; + u8 initiator_depth; + unsigned int target_ack_delay:5; + unsigned int failover_accepted:2; + unsigned int flow_control:1; + unsigned int rnr_retry_count:3; + unsigned int srq:1; +}; + +enum ib_cm_rej_reason { + IB_CM_REJ_NO_QP = 1, + IB_CM_REJ_NO_EEC = 2, + IB_CM_REJ_NO_RESOURCES = 3, + IB_CM_REJ_TIMEOUT = 4, + IB_CM_REJ_UNSUPPORTED = 5, + IB_CM_REJ_INVALID_COMM_ID = 6, + IB_CM_REJ_INVALID_COMM_INSTANCE = 7, + IB_CM_REJ_INVALID_SERVICE_ID = 8, + IB_CM_REJ_INVALID_TRANSPORT_TYPE = 9, + IB_CM_REJ_STALE_CONN = 10, + IB_CM_REJ_RDC_NOT_EXIST = 11, + IB_CM_REJ_INVALID_GID = 12, + IB_CM_REJ_INVALID_LID = 13, + IB_CM_REJ_INVALID_SL = 14, + IB_CM_REJ_INVALID_TRAFFIC_CLASS = 15, + IB_CM_REJ_INVALID_HOP_LIMIT = 16, + IB_CM_REJ_INVALID_PACKET_RATE = 17, + IB_CM_REJ_INVALID_ALT_GID = 18, + IB_CM_REJ_INVALID_ALT_LID = 19, + IB_CM_REJ_INVALID_ALT_SL = 20, + IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS = 21, + IB_CM_REJ_INVALID_ALT_HOP_LIMIT = 22, + IB_CM_REJ_INVALID_ALT_PACKET_RATE = 23, + IB_CM_REJ_PORT_CM_REDIRECT = 24, + IB_CM_REJ_PORT_REDIRECT = 25, + IB_CM_REJ_INVALID_MTU = 26, + IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES = 27, + IB_CM_REJ_CONSUMER_DEFINED = 28, + IB_CM_REJ_INVALID_RNR_RETRY = 29, + IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID = 30, + IB_CM_REJ_INVALID_CLASS_VERSION = 31, + IB_CM_REJ_INVALID_FLOW_LABEL = 32, + IB_CM_REJ_INVALID_ALT_FLOW_LABEL = 33 +}; + +struct ib_cm_rej_event_param { + enum ib_cm_rej_reason reason; + void *ari; + u8 ari_length; +}; + +struct ib_cm_mra_event_param { + u8 service_timeout; +}; + +struct ib_cm_lap_event_param { + struct ib_sa_path_rec *alternate_path; +}; + +enum ib_cm_apr_status { + IB_CM_APR_SUCCESS, + IB_CM_APR_INVALID_COMM_ID, + IB_CM_APR_UNSUPPORTED, + IB_CM_APR_REJECT, + IB_CM_APR_REDIRECT, + IB_CM_APR_IS_CURRENT, + IB_CM_APR_INVALID_QPN_EECN, + IB_CM_APR_INVALID_LID, + IB_CM_APR_INVALID_GID, + IB_CM_APR_INVALID_FLOW_LABEL, + IB_CM_APR_INVALID_TCLASS, + IB_CM_APR_INVALID_HOP_LIMIT, + IB_CM_APR_INVALID_PACKET_RATE, + IB_CM_APR_INVALID_SL +}; + +struct ib_cm_apr_event_param { + enum ib_cm_apr_status ap_status; + void *apr_info; + u8 info_len; +}; + +struct ib_cm_sidr_req_event_param { + struct ib_cm_id *listen_id; + struct ib_device *device; + u8 port; + u16 pkey; +}; + +enum ib_cm_sidr_status { + IB_SIDR_SUCCESS, + IB_SIDR_UNSUPPORTED, + IB_SIDR_REJECT, + IB_SIDR_NO_QP, + IB_SIDR_REDIRECT, + IB_SIDR_UNSUPPORTED_VERSION +}; + +struct ib_cm_sidr_rep_event_param { + enum ib_cm_sidr_status status; + u32 qkey; + u32 qpn; + void *info; + u8 info_len; + +}; + +struct ib_cm_event { + enum ib_cm_event_type event; + union { + struct ib_cm_req_event_param req_rcvd; + struct ib_cm_rep_event_param rep_rcvd; + /* No data for RTU received events. */ + struct ib_cm_rej_event_param rej_rcvd; + struct ib_cm_mra_event_param mra_rcvd; + struct ib_cm_lap_event_param lap_rcvd; + struct ib_cm_apr_event_param apr_rcvd; + /* No data for DREQ/DREP received events. */ + struct ib_cm_sidr_req_event_param sidr_req_rcvd; + struct ib_cm_sidr_rep_event_param sidr_rep_rcvd; + enum ib_wc_status send_status; + } param; + + void *private_data; +}; + +/** + * ib_cm_handler - User-defined callback to process communication events. + * @cm_id: Communication identifier associated with the reported event. + * @event: Information about the communication event. + * + * IB_CM_REQ_RECEIVED and IB_CM_SIDR_REQ_RECEIVED communication events + * generated as a result of listen requests result in the allocation of a + * new @cm_id. The new @cm_id is returned to the user through this callback. + * Clients are responsible for destroying the new @cm_id. For peer-to-peer + * IB_CM_REQ_RECEIVED and all other events, the returned @cm_id corresponds + * to a user's existing communication identifier. + * + * Users may not call ib_destroy_cm_id while in the context of this callback; + * however, returning a non-zero value instructs the communication manager to + * destroy the @cm_id after the callback completes. + */ +typedef int (*ib_cm_handler)(struct ib_cm_id *cm_id, + struct ib_cm_event *event); + +struct ib_cm_id { + ib_cm_handler cm_handler; + void *context; + __be64 service_id; + __be64 service_mask; + enum ib_cm_state state; /* internal CM/debug use */ + enum ib_cm_lap_state lap_state; /* internal CM/debug use */ + __be32 local_id; + __be32 remote_id; +}; + +/** + * ib_create_cm_id - Allocate a communication identifier. + * @cm_handler: Callback invoked to notify the user of CM events. + * @context: User specified context associated with the communication + * identifier. + * + * Communication identifiers are used to track connection states, service + * ID resolution requests, and listen requests. + */ +struct ib_cm_id *ib_create_cm_id(ib_cm_handler cm_handler, + void *context); + +/** + * ib_destroy_cm_id - Destroy a connection identifier. + * @cm_id: Connection identifier to destroy. + * + * This call blocks until the connection identifier is destroyed. + */ +void ib_destroy_cm_id(struct ib_cm_id *cm_id); + +#define IB_SERVICE_ID_AGN_MASK __constant_cpu_to_be64(0xFF00000000000000ULL) +#define IB_CM_ASSIGN_SERVICE_ID __constant_cpu_to_be64(0x0200000000000000ULL) + +/** + * ib_cm_listen - Initiates listening on the specified service ID for + * connection and service ID resolution requests. + * @cm_id: Connection identifier associated with the listen request. + * @service_id: Service identifier matched against incoming connection + * and service ID resolution requests. The service ID should be specified + * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will + * assign a service ID to the caller. + * @service_mask: Mask applied to service ID used to listen across a + * range of service IDs. If set to 0, the service ID is matched + * exactly. This parameter is ignored if %service_id is set to + * IB_CM_ASSIGN_SERVICE_ID. + */ +int ib_cm_listen(struct ib_cm_id *cm_id, + __be64 service_id, + __be64 service_mask); + +struct ib_cm_req_param { + struct ib_sa_path_rec *primary_path; + struct ib_sa_path_rec *alternate_path; + __be64 service_id; + u32 qp_num; + enum ib_qp_type_t qp_type; + u32 starting_psn; + const void *private_data; + u8 private_data_len; + u8 peer_to_peer; + u8 responder_resources; + u8 initiator_depth; + u8 remote_cm_response_timeout; + u8 flow_control; + u8 local_cm_response_timeout; + u8 retry_count; + u8 rnr_retry_count; + u8 max_cm_retries; + u8 srq; +}; + +/** + * ib_send_cm_req - Sends a connection request to the remote node. + * @cm_id: Connection identifier that will be associated with the + * connection request. + * @param: Connection request information needed to establish the + * connection. + */ +int ib_send_cm_req(struct ib_cm_id *cm_id, + struct ib_cm_req_param *param); + +struct ib_cm_rep_param { + u32 qp_num; + u32 starting_psn; + const void *private_data; + u8 private_data_len; + u8 responder_resources; + u8 initiator_depth; + u8 target_ack_delay; + u8 failover_accepted; + u8 flow_control; + u8 rnr_retry_count; + u8 srq; +}; + +/** + * ib_send_cm_rep - Sends a connection reply in response to a connection + * request. + * @cm_id: Connection identifier that will be associated with the + * connection request. + * @param: Connection reply information needed to establish the + * connection. + */ +int ib_send_cm_rep(struct ib_cm_id *cm_id, + struct ib_cm_rep_param *param); + +/** + * ib_send_cm_rtu - Sends a connection ready to use message in response + * to a connection reply message. + * @cm_id: Connection identifier associated with the connection request. + * @private_data: Optional user-defined private data sent with the + * ready to use message. + * @private_data_len: Size of the private data buffer, in bytes. + */ +int ib_send_cm_rtu(struct ib_cm_id *cm_id, + const void *private_data, + u8 private_data_len); + +/** + * ib_send_cm_dreq - Sends a disconnection request for an existing + * connection. + * @cm_id: Connection identifier associated with the connection being + * released. + * @private_data: Optional user-defined private data sent with the + * disconnection request message. + * @private_data_len: Size of the private data buffer, in bytes. + */ +int ib_send_cm_dreq(struct ib_cm_id *cm_id, + const void *private_data, + u8 private_data_len); + +/** + * ib_send_cm_drep - Sends a disconnection reply to a disconnection request. + * @cm_id: Connection identifier associated with the connection being + * released. + * @private_data: Optional user-defined private data sent with the + * disconnection reply message. + * @private_data_len: Size of the private data buffer, in bytes. + * + * If the cm_id is in the correct state, the CM will transition the connection + * to the timewait state, even if an error occurs sending the DREP message. + */ +int ib_send_cm_drep(struct ib_cm_id *cm_id, + const void *private_data, + u8 private_data_len); + +/** + * ib_cm_establish - Forces a connection state to established. + * @cm_id: Connection identifier to transition to established. + * + * This routine should be invoked by users who receive messages on a + * connected QP before an RTU has been received. + */ +int ib_cm_establish(struct ib_cm_id *cm_id); + +/** + * ib_send_cm_rej - Sends a connection rejection message to the + * remote node. + * @cm_id: Connection identifier associated with the connection being + * rejected. + * @reason: Reason for the connection request rejection. + * @ari: Optional additional rejection information. + * @ari_length: Size of the additional rejection information, in bytes. + * @private_data: Optional user-defined private data sent with the + * rejection message. + * @private_data_len: Size of the private data buffer, in bytes. + */ +int ib_send_cm_rej(struct ib_cm_id *cm_id, + enum ib_cm_rej_reason reason, + void *ari, + u8 ari_length, + const void *private_data, + u8 private_data_len); + +/** + * ib_send_cm_mra - Sends a message receipt acknowledgement to a connection + * message. + * @cm_id: Connection identifier associated with the connection message. + * @service_timeout: The maximum time required for the sender to reply to + * to the connection message. + * @private_data: Optional user-defined private data sent with the + * message receipt acknowledgement. + * @private_data_len: Size of the private data buffer, in bytes. + */ +int ib_send_cm_mra(struct ib_cm_id *cm_id, + u8 service_timeout, + const void *private_data, + u8 private_data_len); + +/** + * ib_send_cm_lap - Sends a load alternate path request. + * @cm_id: Connection identifier associated with the load alternate path + * message. + * @alternate_path: A path record that identifies the alternate path to + * load. + * @private_data: Optional user-defined private data sent with the + * load alternate path message. + * @private_data_len: Size of the private data buffer, in bytes. + */ +int ib_send_cm_lap(struct ib_cm_id *cm_id, + struct ib_sa_path_rec *alternate_path, + const void *private_data, + u8 private_data_len); + +/** + * ib_cm_init_qp_attr - Initializes the QP attributes for use in transitioning + * to a specified QP state. + * @cm_id: Communication identifier associated with the QP attributes to + * initialize. + * @qp_attr: On input, specifies the desired QP state. On output, the + * mandatory and desired optional attributes will be set in order to + * modify the QP to the specified state. + * @qp_attr_mask: The QP attribute mask that may be used to transition the + * QP to the specified state. + * + * Users must set the @qp_attr->qp_state to the desired QP state. This call + * will set all required attributes for the given transition, along with + * known optional attributes. Users may override the attributes returned from + * this call before calling ib_modify_qp. + */ +int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, + struct ib_qp_attr *qp_attr, + int *qp_attr_mask); + +/** + * ib_send_cm_apr - Sends an alternate path response message in response to + * a load alternate path request. + * @cm_id: Connection identifier associated with the alternate path response. + * @status: Reply status sent with the alternate path response. + * @info: Optional additional information sent with the alternate path + * response. + * @info_length: Size of the additional information, in bytes. + * @private_data: Optional user-defined private data sent with the + * alternate path response message. + * @private_data_len: Size of the private data buffer, in bytes. + */ +int ib_send_cm_apr(struct ib_cm_id *cm_id, + enum ib_cm_apr_status status, + void *info, + u8 info_length, + const void *private_data, + u8 private_data_len); + +struct ib_cm_sidr_req_param { + struct ib_sa_path_rec *path; + __be64 service_id; + int timeout_ms; + const void *private_data; + u8 private_data_len; + u8 max_cm_retries; + u16 pkey; +}; + +/** + * ib_send_cm_sidr_req - Sends a service ID resolution request to the + * remote node. + * @cm_id: Communication identifier that will be associated with the + * service ID resolution request. + * @param: Service ID resolution request information. + */ +int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, + struct ib_cm_sidr_req_param *param); + +struct ib_cm_sidr_rep_param { + u32 qp_num; + u32 qkey; + enum ib_cm_sidr_status status; + const void *info; + u8 info_length; + const void *private_data; + u8 private_data_len; +}; + +/** + * ib_send_cm_sidr_rep - Sends a service ID resolution request to the + * remote node. + * @cm_id: Communication identifier associated with the received service ID + * resolution request. + * @param: Service ID resolution reply information. + */ +int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, + struct ib_cm_sidr_rep_param *param); + +#endif /* IB_CM_H */ diff --git a/branches/MTHCA/inc/kernel/mthca/ib_fmr_pool.h b/branches/MTHCA/inc/kernel/mthca/ib_fmr_pool.h new file mode 100644 index 00000000..6c9e24d6 --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/ib_fmr_pool.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: ib_fmr_pool.h 2730 2005-06-28 16:43:03Z sean.hefty $ + */ + +#if !defined(IB_FMR_POOL_H) +#define IB_FMR_POOL_H + +#include + +struct ib_fmr_pool; + +/** + * struct ib_fmr_pool_param - Parameters for creating FMR pool + * @max_pages_per_fmr:Maximum number of pages per map request. + * @access:Access flags for FMRs in pool. + * @pool_size:Number of FMRs to allocate for pool. + * @dirty_watermark:Flush is triggered when @dirty_watermark dirty + * FMRs are present. + * @flush_function:Callback called when unmapped FMRs are flushed and + * more FMRs are possibly available for mapping + * @flush_arg:Context passed to user's flush function. + * @cache:If set, FMRs may be reused after unmapping for identical map + * requests. + */ +struct ib_fmr_pool_param { + int max_pages_per_fmr; + enum ib_access_flags access; + int pool_size; + int dirty_watermark; + void (*flush_function)(struct ib_fmr_pool *pool, + void * arg); + void *flush_arg; + unsigned cache:1; +}; + +struct ib_pool_fmr { + struct ib_fmr *fmr; + struct ib_fmr_pool *pool; + struct list_head list; + struct hlist_node cache_node; + int ref_count; + int remap_count; + u64 io_virtual_address; + int page_list_len; + u64 page_list[0]; +}; + +struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, + struct ib_fmr_pool_param *params); + +void ib_destroy_fmr_pool(struct ib_fmr_pool *pool); + +int ib_flush_fmr_pool(struct ib_fmr_pool *pool); + +struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, + u64 *page_list, + int list_len, + u64 *io_virtual_address); + +int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr); + +#endif /* IB_FMR_POOL_H */ diff --git a/branches/MTHCA/inc/kernel/mthca/ib_mad.h b/branches/MTHCA/inc/kernel/mthca/ib_mad.h new file mode 100644 index 00000000..362c2778 --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/ib_mad.h @@ -0,0 +1,584 @@ +/* + * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2004 Infinicon Corporation. All rights reserved. + * Copyright (c) 2004 Intel Corporation. All rights reserved. + * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: ib_mad.h 2928 2005-07-28 18:45:56Z sean.hefty $ + */ + +#if !defined( IB_MAD_H ) +#define IB_MAD_H + +#include + +/* Management base version */ +#define IB_MGMT_BASE_VERSION 1 + +/* Management classes */ +#define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01 +#define IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 0x81 +#define IB_MGMT_CLASS_SUBN_ADM 0x03 +#define IB_MGMT_CLASS_PERF_MGMT 0x04 +#define IB_MGMT_CLASS_BM 0x05 +#define IB_MGMT_CLASS_DEVICE_MGMT 0x06 +#define IB_MGMT_CLASS_CM 0x07 +#define IB_MGMT_CLASS_SNMP 0x08 +#define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30 +#define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F + +#define IB_OPENIB_OUI (0x001405) + +/* Management methods */ +#define IB_MGMT_METHOD_GET 0x01 +#define IB_MGMT_METHOD_SET 0x02 +#define IB_MGMT_METHOD_GET_RESP 0x81 +#define IB_MGMT_METHOD_SEND 0x03 +#define IB_MGMT_METHOD_TRAP 0x05 +#define IB_MGMT_METHOD_REPORT 0x06 +#define IB_MGMT_METHOD_REPORT_RESP 0x86 +#define IB_MGMT_METHOD_TRAP_REPRESS 0x07 + +#define IB_MGMT_METHOD_RESP 0x80 + +#define IB_MGMT_MAX_METHODS 128 + +/* RMPP information */ +#define IB_MGMT_RMPP_VERSION 1 + +#define IB_MGMT_RMPP_TYPE_DATA 1 +#define IB_MGMT_RMPP_TYPE_ACK 2 +#define IB_MGMT_RMPP_TYPE_STOP 3 +#define IB_MGMT_RMPP_TYPE_ABORT 4 + +#define IB_MGMT_RMPP_FLAG_ACTIVE 1 +#define IB_MGMT_RMPP_FLAG_FIRST (1<<1) +#define IB_MGMT_RMPP_FLAG_LAST (1<<2) + +#define IB_MGMT_RMPP_NO_RESPTIME 0x1F + +#define IB_MGMT_RMPP_STATUS_SUCCESS 0 +#define IB_MGMT_RMPP_STATUS_RESX 1 +#define IB_MGMT_RMPP_STATUS_ABORT_MIN 118 +#define IB_MGMT_RMPP_STATUS_T2L 118 +#define IB_MGMT_RMPP_STATUS_BAD_LEN 119 +#define IB_MGMT_RMPP_STATUS_BAD_SEG 120 +#define IB_MGMT_RMPP_STATUS_BADT 121 +#define IB_MGMT_RMPP_STATUS_W2S 122 +#define IB_MGMT_RMPP_STATUS_S2B 123 +#define IB_MGMT_RMPP_STATUS_BAD_STATUS 124 +#define IB_MGMT_RMPP_STATUS_UNV 125 +#define IB_MGMT_RMPP_STATUS_TMR 126 +#define IB_MGMT_RMPP_STATUS_UNSPEC 127 +#define IB_MGMT_RMPP_STATUS_ABORT_MAX 127 + +#ifdef LINUX_TO_BE_REMOVED +// defined in ib_types.h +#define IB_QP0 0 +#define IB_QP1 __constant_htonl(1) +#endif +#define IB_QP1_QKEY 0x80010000 +#define IB_QP_SET_QKEY 0x80000000 + +struct ib_mad_hdr { + u8 base_version; + u8 mgmt_class; + u8 class_version; + u8 method; + __be16 status; + __be16 class_specific; + __be64 tid; + __be16 attr_id; + __be16 resv; + __be32 attr_mod; +}; + +struct ib_rmpp_hdr { + u8 rmpp_version; + u8 rmpp_type; + u8 rmpp_rtime_flags; + u8 rmpp_status; + __be32 seg_num; + __be32 paylen_newwin; +}; + +typedef u64 __bitwise ib_sa_comp_mask; + +#define IB_SA_COMP_MASK(n) ((__force ib_sa_comp_mask) cpu_to_be64(1ull << n)) + +/* + * ib_sa_hdr and ib_sa_mad structures must be packed because they have + * 64-bit fields that are only 32-bit aligned. 64-bit architectures will + * lay them out wrong otherwise. (And unfortunately they are sent on + * the wire so we can't change the layout) + */ +#pragma pack(push,1) +struct ib_sa_hdr { + __be64 sm_key; + __be16 attr_offset; + __be16 reserved; + ib_sa_comp_mask comp_mask; +} __attribute__ ((packed)); +#pragma pack(pop) + +struct ib_mad { + struct ib_mad_hdr mad_hdr; + u8 data[232]; +}; + +struct ib_rmpp_mad { + struct ib_mad_hdr mad_hdr; + struct ib_rmpp_hdr rmpp_hdr; + u8 data[220]; +}; + +#pragma pack(push,1) +struct ib_sa_mad { + struct ib_mad_hdr mad_hdr; + struct ib_rmpp_hdr rmpp_hdr; + struct ib_sa_hdr sa_hdr; + u8 data[200]; +} __attribute__ ((packed)); +#pragma pack(pop) + +struct ib_vendor_mad { + struct ib_mad_hdr mad_hdr; + struct ib_rmpp_hdr rmpp_hdr; + u8 reserved; + u8 oui[3]; + u8 data[216]; +}; + +/** + * ib_mad_send_buf - MAD data buffer and work request for sends. + * @mad: References an allocated MAD data buffer. The size of the data + * buffer is specified in the @send_wr.length field. + * @mapping: DMA mapping information. + * @mad_agent: MAD agent that allocated the buffer. + * @context: User-controlled context fields. + * @send_wr: An initialized work request structure used when sending the MAD. + * The wr_id field of the work request is initialized to reference this + * data structure. + * @sge: A scatter-gather list referenced by the work request. + * + * Users are responsible for initializing the MAD buffer itself, with the + * exception of specifying the payload length field in any RMPP MAD. + */ +struct ib_mad_send_buf { + struct ib_mad *mad; + dma_addr_t mapping; + struct ib_mad_agent *mad_agent; + void *context[2]; + struct ib_send_wr send_wr; + struct ib_sge sge; +}; + +/** + * ib_get_rmpp_resptime - Returns the RMPP response time. + * @rmpp_hdr: An RMPP header. + */ +static inline u8 ib_get_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr) +{ + return rmpp_hdr->rmpp_rtime_flags >> 3; +} + +/** + * ib_get_rmpp_flags - Returns the RMPP flags. + * @rmpp_hdr: An RMPP header. + */ +static inline u8 ib_get_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr) +{ + return rmpp_hdr->rmpp_rtime_flags & 0x7; +} + +/** + * ib_set_rmpp_resptime - Sets the response time in an RMPP header. + * @rmpp_hdr: An RMPP header. + * @rtime: The response time to set. + */ +static inline void ib_set_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr, u8 rtime) +{ + rmpp_hdr->rmpp_rtime_flags = ib_get_rmpp_flags(rmpp_hdr) | (rtime << 3); +} + +/** + * ib_set_rmpp_flags - Sets the flags in an RMPP header. + * @rmpp_hdr: An RMPP header. + * @flags: The flags to set. + */ +static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags) +{ + rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF1) | + (flags & 0x7); +} + +struct ib_mad_agent; +struct ib_mad_send_wc; +struct ib_mad_recv_wc; + +/** + * ib_mad_send_handler - callback handler for a sent MAD. + * @mad_agent: MAD agent that sent the MAD. + * @mad_send_wc: Send work completion information on the sent MAD. + */ +typedef void (*ib_mad_send_handler)(struct ib_mad_agent *mad_agent, + struct ib_mad_send_wc *mad_send_wc); + +/** + * ib_mad_snoop_handler - Callback handler for snooping sent MADs. + * @mad_agent: MAD agent that snooped the MAD. + * @send_wr: Work request information on the sent MAD. + * @mad_send_wc: Work completion information on the sent MAD. Valid + * only for snooping that occurs on a send completion. + * + * Clients snooping MADs should not modify data referenced by the @send_wr + * or @mad_send_wc. + */ +typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent, + struct ib_send_wr *send_wr, + struct ib_mad_send_wc *mad_send_wc); + +/** + * ib_mad_recv_handler - callback handler for a received MAD. + * @mad_agent: MAD agent requesting the received MAD. + * @mad_recv_wc: Received work completion information on the received MAD. + * + * MADs received in response to a send request operation will be handed to + * the user after the send operation completes. All data buffers given + * to registered agents through this routine are owned by the receiving + * client, except for snooping agents. Clients snooping MADs should not + * modify the data referenced by @mad_recv_wc. + */ +typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent, + struct ib_mad_recv_wc *mad_recv_wc); + +/** + * ib_mad_agent - Used to track MAD registration with the access layer. + * @device: Reference to device registration is on. + * @qp: Reference to QP used for sending and receiving MADs. + * @mr: Memory region for system memory usable for DMA. + * @recv_handler: Callback handler for a received MAD. + * @send_handler: Callback handler for a sent MAD. + * @snoop_handler: Callback handler for snooped sent MADs. + * @context: User-specified context associated with this registration. + * @hi_tid: Access layer assigned transaction ID for this client. + * Unsolicited MADs sent by this client will have the upper 32-bits + * of their TID set to this value. + * @port_num: Port number on which QP is registered + * @rmpp_version: If set, indicates the RMPP version used by this agent. + */ +struct ib_mad_agent { + struct ib_device *device; + struct ib_qp *qp; + struct ib_mr *mr; + ib_mad_recv_handler recv_handler; + ib_mad_send_handler send_handler; + ib_mad_snoop_handler snoop_handler; + void *context; + u32 hi_tid; + u8 port_num; + u8 rmpp_version; +}; + +/** + * ib_mad_send_wc - MAD send completion information. + * @wr_id: Work request identifier associated with the send MAD request. + * @status: Completion status. + * @vendor_err: Optional vendor error information returned with a failed + * request. + */ +struct ib_mad_send_wc { + u64 wr_id; + enum ib_wc_status status; + u32 vendor_err; +}; + +/** + * ib_mad_recv_buf - received MAD buffer information. + * @list: Reference to next data buffer for a received RMPP MAD. + * @grh: References a data buffer containing the global route header. + * The data refereced by this buffer is only valid if the GRH is + * valid. + * @mad: References the start of the received MAD. + */ +struct ib_mad_recv_buf { + struct list_head list; + struct ib_grh *grh; + struct ib_mad *mad; +}; + +/** + * ib_mad_recv_wc - received MAD information. + * @wc: Completion information for the received data. + * @recv_buf: Specifies the location of the received data buffer(s). + * @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers. + * @mad_len: The length of the received MAD, without duplicated headers. + * + * For received response, the wr_id field of the wc is set to the wr_id + * for the corresponding send request. + */ +struct ib_mad_recv_wc { + struct ib_wc *wc; + struct ib_mad_recv_buf recv_buf; + struct list_head rmpp_list; + int mad_len; +}; + +/** + * ib_mad_reg_req - MAD registration request + * @mgmt_class: Indicates which management class of MADs should be receive + * by the caller. This field is only required if the user wishes to + * receive unsolicited MADs, otherwise it should be 0. + * @mgmt_class_version: Indicates which version of MADs for the given + * management class to receive. + * @oui: Indicates IEEE OUI when mgmt_class is a vendor class + * in the range from 0x30 to 0x4f. Otherwise not used. + * @method_mask: The caller will receive unsolicited MADs for any method + * where @method_mask = 1. + */ +struct ib_mad_reg_req { + u8 mgmt_class; + u8 mgmt_class_version; + u8 oui[3]; + DECLARE_BITMAP(method_mask, IB_MGMT_MAX_METHODS); +}; + +/** + * ib_register_mad_agent - Register to send/receive MADs. + * @device: The device to register with. + * @port_num: The port on the specified device to use. + * @qp_type: Specifies which QP to access. Must be either + * IB_QPT_QP0 or IB_QPT_QP1. + * @mad_reg_req: Specifies which unsolicited MADs should be received + * by the caller. This parameter may be NULL if the caller only + * wishes to receive solicited responses. + * @rmpp_version: If set, indicates that the client will send + * and receive MADs that contain the RMPP header for the given version. + * If set to 0, indicates that RMPP is not used by this client. + * @send_handler: The completion callback routine invoked after a send + * request has completed. + * @recv_handler: The completion callback routine invoked for a received + * MAD. + * @context: User specified context associated with the registration. + */ +struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, + u8 port_num, + enum ib_qp_type_t qp_type, + struct ib_mad_reg_req *mad_reg_req, + u8 rmpp_version, + ib_mad_send_handler send_handler, + ib_mad_recv_handler recv_handler, + void *context); + +enum ib_mad_snoop_flags { + /*IB_MAD_SNOOP_POSTED_SENDS = 1,*/ + /*IB_MAD_SNOOP_RMPP_SENDS = (1<<1),*/ + IB_MAD_SNOOP_SEND_COMPLETIONS = (1<<2), + /*IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS = (1<<3),*/ + IB_MAD_SNOOP_RECVS = (1<<4) + /*IB_MAD_SNOOP_RMPP_RECVS = (1<<5),*/ + /*IB_MAD_SNOOP_REDIRECTED_QPS = (1<<6)*/ +}; + +/** + * ib_register_mad_snoop - Register to snoop sent and received MADs. + * @device: The device to register with. + * @port_num: The port on the specified device to use. + * @qp_type: Specifies which QP traffic to snoop. Must be either + * IB_QPT_QP0 or IB_QPT_QP1. + * @mad_snoop_flags: Specifies information where snooping occurs. + * @send_handler: The callback routine invoked for a snooped send. + * @recv_handler: The callback routine invoked for a snooped receive. + * @context: User specified context associated with the registration. + */ +struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, + u8 port_num, + enum ib_qp_type_t qp_type, + int mad_snoop_flags, + ib_mad_snoop_handler snoop_handler, + ib_mad_recv_handler recv_handler, + void *context); + +/** + * ib_unregister_mad_agent - Unregisters a client from using MAD services. + * @mad_agent: Corresponding MAD registration request to deregister. + * + * After invoking this routine, MAD services are no longer usable by the + * client on the associated QP. + */ +int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent); + +/** + * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated + * with the registered client. + * @mad_agent: Specifies the associated registration to post the send to. + * @send_wr: Specifies the information needed to send the MAD(s). + * @bad_send_wr: Specifies the MAD on which an error was encountered. + * + * Sent MADs are not guaranteed to complete in the order that they were posted. + * + * If the MAD requires RMPP, the data buffer should contain a single copy + * of the common MAD, RMPP, and class specific headers, followed by the class + * defined data. If the class defined data would not divide evenly into + * RMPP segments, then space must be allocated at the end of the referenced + * buffer for any required padding. To indicate the amount of class defined + * data being transferred, the paylen_newwin field in the RMPP header should + * be set to the size of the class specific header plus the amount of class + * defined data being transferred. The paylen_newwin field should be + * specified in network-byte order. + */ +int ib_post_send_mad(struct ib_mad_agent *mad_agent, + struct ib_send_wr *send_wr, + struct ib_send_wr **bad_send_wr); + +/** + * ib_coalesce_recv_mad - Coalesces received MAD data into a single buffer. + * @mad_recv_wc: Work completion information for a received MAD. + * @buf: User-provided data buffer to receive the coalesced buffers. The + * referenced buffer should be at least the size of the mad_len specified + * by @mad_recv_wc. + * + * This call copies a chain of received MAD segments into a single data buffer, + * removing duplicated headers. + */ +void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf); + +/** + * ib_free_recv_mad - Returns data buffers used to receive a MAD. + * @mad_recv_wc: Work completion information for a received MAD. + * + * Clients receiving MADs through their ib_mad_recv_handler must call this + * routine to return the work completion buffers to the access layer. + */ +void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc); + +/** + * ib_cancel_mad - Cancels an outstanding send MAD operation. + * @mad_agent: Specifies the registration associated with sent MAD. + * @wr_id: Indicates the work request identifier of the MAD to cancel. + * + * MADs will be returned to the user through the corresponding + * ib_mad_send_handler. + */ +void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id); + +/** + * ib_modify_mad - Modifies an outstanding send MAD operation. + * @mad_agent: Specifies the registration associated with sent MAD. + * @wr_id: Indicates the work request identifier of the MAD to modify. + * @timeout_ms: New timeout value for sent MAD. + * + * This call will reset the timeout value for a sent MAD to the specified + * value. + */ +int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms); + +/** + * ib_redirect_mad_qp - Registers a QP for MAD services. + * @qp: Reference to a QP that requires MAD services. + * @rmpp_version: If set, indicates that the client will send + * and receive MADs that contain the RMPP header for the given version. + * If set to 0, indicates that RMPP is not used by this client. + * @send_handler: The completion callback routine invoked after a send + * request has completed. + * @recv_handler: The completion callback routine invoked for a received + * MAD. + * @context: User specified context associated with the registration. + * + * Use of this call allows clients to use MAD services, such as RMPP, + * on user-owned QPs. After calling this routine, users may send + * MADs on the specified QP by calling ib_mad_post_send. + */ +struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, + u8 rmpp_version, + ib_mad_send_handler send_handler, + ib_mad_recv_handler recv_handler, + void *context); + +/** + * ib_process_mad_wc - Processes a work completion associated with a + * MAD sent or received on a redirected QP. + * @mad_agent: Specifies the registered MAD service using the redirected QP. + * @wc: References a work completion associated with a sent or received + * MAD segment. + * + * This routine is used to complete or continue processing on a MAD request. + * If the work completion is associated with a send operation, calling + * this routine is required to continue an RMPP transfer or to wait for a + * corresponding response, if it is a request. If the work completion is + * associated with a receive operation, calling this routine is required to + * process an inbound or outbound RMPP transfer, or to match a response MAD + * with its corresponding request. + */ +int ib_process_mad_wc(struct ib_mad_agent *mad_agent, + struct ib_wc *wc); + +/** + * ib_create_send_mad - Allocate and initialize a data buffer and work request + * for sending a MAD. + * @mad_agent: Specifies the registered MAD service to associate with the MAD. + * @remote_qpn: Specifies the QPN of the receiving node. + * @pkey_index: Specifies which PKey the MAD will be sent using. This field + * is valid only if the remote_qpn is QP 1. + * @ah: References the address handle used to transfer to the remote node. + * @rmpp_active: Indicates if the send will enable RMPP. + * @hdr_len: Indicates the size of the data header of the MAD. This length + * should include the common MAD header, RMPP header, plus any class + * specific header. + * @data_len: Indicates the size of any user-transferred data. The call will + * automatically adjust the allocated buffer size to account for any + * additional padding that may be necessary. + * @gfp_mask: GFP mask used for the memory allocation. + * + * This is a helper routine that may be used to allocate a MAD. Users are + * not required to allocate outbound MADs using this call. The returned + * MAD send buffer will reference a data buffer usable for sending a MAD, along + * with an initialized work request structure. Users may modify the returned + * MAD data buffer or work request before posting the send. + * + * The returned data buffer will be cleared. Users are responsible for + * initializing the common MAD and any class specific headers. If @rmpp_active + * is set, the RMPP header will be initialized for sending. + */ +struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, + u32 remote_qpn, u16 pkey_index, + struct ib_ah *ah, int rmpp_active, + int hdr_len, int data_len, + unsigned int __nocast gfp_mask); + +/** + * ib_free_send_mad - Returns data buffers used to send a MAD. + * @send_buf: Previously allocated send data buffer. + */ +void ib_free_send_mad(struct ib_mad_send_buf *send_buf); + +#endif /* IB_MAD_H */ diff --git a/branches/MTHCA/inc/kernel/mthca/ib_pack.h b/branches/MTHCA/inc/kernel/mthca/ib_pack.h new file mode 100644 index 00000000..fe480f3e --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/ib_pack.h @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: ib_pack.h 1349 2004-12-16 21:09:43Z roland $ + */ + +#ifndef IB_PACK_H +#define IB_PACK_H + +#include + +enum { + IB_LRH_BYTES = 8, + IB_GRH_BYTES = 40, + IB_BTH_BYTES = 12, + IB_DETH_BYTES = 8 +}; + +struct ib_field { + size_t struct_offset_bytes; + size_t struct_size_bytes; + int offset_words; + int offset_bits; + int size_bits; + char *field_name; +}; + +#define RESERVED \ + .field_name = "reserved" + +/* + * This macro cleans up the definitions of constants for BTH opcodes. + * It is used to define constants such as IB_OPCODE_UD_SEND_ONLY, + * which becomes IB_OPCODE_UD + IB_OPCODE_SEND_ONLY, and this gives + * the correct value. + * + * In short, user code should use the constants defined using the + * macro rather than worrying about adding together other constants. +*/ +#define IB_OPCODE(transport, op) \ + IB_OPCODE_ ## transport ## _ ## op = \ + IB_OPCODE_ ## transport + IB_OPCODE_ ## op + +enum { + /* transport types -- just used to define real constants */ + IB_OPCODE_RC = 0x00, + IB_OPCODE_UC = 0x20, + IB_OPCODE_RD = 0x40, + IB_OPCODE_UD = 0x60, + + /* operations -- just used to define real constants */ + IB_OPCODE_SEND_FIRST = 0x00, + IB_OPCODE_SEND_MIDDLE = 0x01, + IB_OPCODE_SEND_LAST = 0x02, + IB_OPCODE_SEND_LAST_WITH_IMMEDIATE = 0x03, + IB_OPCODE_SEND_ONLY = 0x04, + IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE = 0x05, + IB_OPCODE_RDMA_WRITE_FIRST = 0x06, + IB_OPCODE_RDMA_WRITE_MIDDLE = 0x07, + IB_OPCODE_RDMA_WRITE_LAST = 0x08, + IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE = 0x09, + IB_OPCODE_RDMA_WRITE_ONLY = 0x0a, + IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE = 0x0b, + IB_OPCODE_RDMA_READ_REQUEST = 0x0c, + IB_OPCODE_RDMA_READ_RESPONSE_FIRST = 0x0d, + IB_OPCODE_RDMA_READ_RESPONSE_MIDDLE = 0x0e, + IB_OPCODE_RDMA_READ_RESPONSE_LAST = 0x0f, + IB_OPCODE_RDMA_READ_RESPONSE_ONLY = 0x10, + IB_OPCODE_ACKNOWLEDGE = 0x11, + IB_OPCODE_ATOMIC_ACKNOWLEDGE = 0x12, + IB_OPCODE_COMPARE_SWAP = 0x13, + IB_OPCODE_FETCH_ADD = 0x14, + + /* real constants follow -- see comment about above IB_OPCODE() + macro for more details */ + + /* RC */ + IB_OPCODE(RC, SEND_FIRST), + IB_OPCODE(RC, SEND_MIDDLE), + IB_OPCODE(RC, SEND_LAST), + IB_OPCODE(RC, SEND_LAST_WITH_IMMEDIATE), + IB_OPCODE(RC, SEND_ONLY), + IB_OPCODE(RC, SEND_ONLY_WITH_IMMEDIATE), + IB_OPCODE(RC, RDMA_WRITE_FIRST), + IB_OPCODE(RC, RDMA_WRITE_MIDDLE), + IB_OPCODE(RC, RDMA_WRITE_LAST), + IB_OPCODE(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE), + IB_OPCODE(RC, RDMA_WRITE_ONLY), + IB_OPCODE(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE), + IB_OPCODE(RC, RDMA_READ_REQUEST), + IB_OPCODE(RC, RDMA_READ_RESPONSE_FIRST), + IB_OPCODE(RC, RDMA_READ_RESPONSE_MIDDLE), + IB_OPCODE(RC, RDMA_READ_RESPONSE_LAST), + IB_OPCODE(RC, RDMA_READ_RESPONSE_ONLY), + IB_OPCODE(RC, ACKNOWLEDGE), + IB_OPCODE(RC, ATOMIC_ACKNOWLEDGE), + IB_OPCODE(RC, COMPARE_SWAP), + IB_OPCODE(RC, FETCH_ADD), + + /* UC */ + IB_OPCODE(UC, SEND_FIRST), + IB_OPCODE(UC, SEND_MIDDLE), + IB_OPCODE(UC, SEND_LAST), + IB_OPCODE(UC, SEND_LAST_WITH_IMMEDIATE), + IB_OPCODE(UC, SEND_ONLY), + IB_OPCODE(UC, SEND_ONLY_WITH_IMMEDIATE), + IB_OPCODE(UC, RDMA_WRITE_FIRST), + IB_OPCODE(UC, RDMA_WRITE_MIDDLE), + IB_OPCODE(UC, RDMA_WRITE_LAST), + IB_OPCODE(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE), + IB_OPCODE(UC, RDMA_WRITE_ONLY), + IB_OPCODE(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE), + + /* RD */ + IB_OPCODE(RD, SEND_FIRST), + IB_OPCODE(RD, SEND_MIDDLE), + IB_OPCODE(RD, SEND_LAST), + IB_OPCODE(RD, SEND_LAST_WITH_IMMEDIATE), + IB_OPCODE(RD, SEND_ONLY), + IB_OPCODE(RD, SEND_ONLY_WITH_IMMEDIATE), + IB_OPCODE(RD, RDMA_WRITE_FIRST), + IB_OPCODE(RD, RDMA_WRITE_MIDDLE), + IB_OPCODE(RD, RDMA_WRITE_LAST), + IB_OPCODE(RD, RDMA_WRITE_LAST_WITH_IMMEDIATE), + IB_OPCODE(RD, RDMA_WRITE_ONLY), + IB_OPCODE(RD, RDMA_WRITE_ONLY_WITH_IMMEDIATE), + IB_OPCODE(RD, RDMA_READ_REQUEST), + IB_OPCODE(RD, RDMA_READ_RESPONSE_FIRST), + IB_OPCODE(RD, RDMA_READ_RESPONSE_MIDDLE), + IB_OPCODE(RD, RDMA_READ_RESPONSE_LAST), + IB_OPCODE(RD, RDMA_READ_RESPONSE_ONLY), + IB_OPCODE(RD, ACKNOWLEDGE), + IB_OPCODE(RD, ATOMIC_ACKNOWLEDGE), + IB_OPCODE(RD, COMPARE_SWAP), + IB_OPCODE(RD, FETCH_ADD), + + /* UD */ + IB_OPCODE(UD, SEND_ONLY), + IB_OPCODE(UD, SEND_ONLY_WITH_IMMEDIATE) +}; + +enum { + IB_LNH_RAW = 0, + IB_LNH_IP = 1, + IB_LNH_IBA_LOCAL = 2, + IB_LNH_IBA_GLOBAL = 3 +}; + +struct ib_unpacked_lrh { + u8 virtual_lane; + u8 link_version; + u8 service_level; + u8 link_next_header; + __be16 destination_lid; + __be16 packet_length; + __be16 source_lid; +}; + +struct ib_unpacked_grh { + u8 ip_version; + u8 traffic_class; + __be32 flow_label; + __be16 payload_length; + u8 next_header; + u8 hop_limit; + union ib_gid source_gid; + union ib_gid destination_gid; +}; + +struct ib_unpacked_bth { + u8 opcode; + u8 solicited_event; + u8 mig_req; + u8 pad_count; + u8 transport_header_version; + __be16 pkey; + __be32 destination_qpn; + u8 ack_req; + __be32 psn; +}; + +struct ib_unpacked_deth { + __be32 qkey; + __be32 source_qpn; +}; + +struct ib_ud_header { + struct ib_unpacked_lrh lrh; + int grh_present; + struct ib_unpacked_grh grh; + struct ib_unpacked_bth bth; + struct ib_unpacked_deth deth; + int immediate_present; + __be32 immediate_data; +}; + +void ib_pack(const struct ib_field *desc, + int desc_len, + void *structure, + void *buf); + +void ib_unpack(const struct ib_field *desc, + int desc_len, + void *buf, + void *structure); + +void ib_ud_header_init(int payload_bytes, + int grh_present, + struct ib_ud_header *header); + +int ib_ud_header_pack(struct ib_ud_header *header, + void *buf); + +int ib_ud_header_unpack(void *buf, + struct ib_ud_header *header); + +#endif /* IB_PACK_H */ diff --git a/branches/MTHCA/inc/kernel/mthca/ib_sa.h b/branches/MTHCA/inc/kernel/mthca/ib_sa.h new file mode 100644 index 00000000..da6fd50d --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/ib_sa.h @@ -0,0 +1,371 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: ib_sa.h 2928 2005-07-28 18:45:56Z sean.hefty $ + */ + +#ifndef IB_SA_H +#define IB_SA_H + +#include +#include + +enum { + IB_SA_CLASS_VERSION = 2, /* IB spec version 1.1/1.2 */ + + IB_SA_METHOD_GET_TABLE = 0x12, + IB_SA_METHOD_GET_TABLE_RESP = 0x92, + IB_SA_METHOD_DELETE = 0x15 +}; + +enum ib_sa_selector { + IB_SA_GTE = 0, + IB_SA_LTE = 1, + IB_SA_EQ = 2, + /* + * The meaning of "best" depends on the attribute: for + * example, for MTU best will return the largest available + * MTU, while for packet life time, best will return the + * smallest available life time. + */ + IB_SA_BEST = 3 +}; + +enum ib_sa_rate { + IB_SA_RATE_2_5_GBPS = 2, + IB_SA_RATE_5_GBPS = 5, + IB_SA_RATE_10_GBPS = 3, + IB_SA_RATE_20_GBPS = 6, + IB_SA_RATE_30_GBPS = 4, + IB_SA_RATE_40_GBPS = 7, + IB_SA_RATE_60_GBPS = 8, + IB_SA_RATE_80_GBPS = 9, + IB_SA_RATE_120_GBPS = 10 +}; + +static inline int ib_sa_rate_enum_to_int(enum ib_sa_rate rate) +{ + switch (rate) { + case IB_SA_RATE_2_5_GBPS: return 1; + case IB_SA_RATE_5_GBPS: return 2; + case IB_SA_RATE_10_GBPS: return 4; + case IB_SA_RATE_20_GBPS: return 8; + case IB_SA_RATE_30_GBPS: return 12; + case IB_SA_RATE_40_GBPS: return 16; + case IB_SA_RATE_60_GBPS: return 24; + case IB_SA_RATE_80_GBPS: return 32; + case IB_SA_RATE_120_GBPS: return 48; + default: return -1; + } +} + +/* + * Structures for SA records are named "struct ib_sa_xxx_rec." No + * attempt is made to pack structures to match the physical layout of + * SA records in SA MADs; all packing and unpacking is handled by the + * SA query code. + * + * For a record with structure ib_sa_xxx_rec, the naming convention + * for the component mask value for field yyy is IB_SA_XXX_REC_YYY (we + * never use different abbreviations or otherwise change the spelling + * of xxx/yyy between ib_sa_xxx_rec.yyy and IB_SA_XXX_REC_YYY). + * + * Reserved rows are indicated with comments to help maintainability. + */ + +/* reserved: 0 */ +/* reserved: 1 */ +#define IB_SA_PATH_REC_DGID IB_SA_COMP_MASK( 2) +#define IB_SA_PATH_REC_SGID IB_SA_COMP_MASK( 3) +#define IB_SA_PATH_REC_DLID IB_SA_COMP_MASK( 4) +#define IB_SA_PATH_REC_SLID IB_SA_COMP_MASK( 5) +#define IB_SA_PATH_REC_RAW_TRAFFIC IB_SA_COMP_MASK( 6) +/* reserved: 7 */ +#define IB_SA_PATH_REC_FLOW_LABEL IB_SA_COMP_MASK( 8) +#define IB_SA_PATH_REC_HOP_LIMIT IB_SA_COMP_MASK( 9) +#define IB_SA_PATH_REC_TRAFFIC_CLASS IB_SA_COMP_MASK(10) +#define IB_SA_PATH_REC_REVERSIBLE IB_SA_COMP_MASK(11) +#define IB_SA_PATH_REC_NUMB_PATH IB_SA_COMP_MASK(12) +#define IB_SA_PATH_REC_PKEY IB_SA_COMP_MASK(13) +/* reserved: 14 */ +#define IB_SA_PATH_REC_SL IB_SA_COMP_MASK(15) +#define IB_SA_PATH_REC_MTU_SELECTOR IB_SA_COMP_MASK(16) +#define IB_SA_PATH_REC_MTU IB_SA_COMP_MASK(17) +#define IB_SA_PATH_REC_RATE_SELECTOR IB_SA_COMP_MASK(18) +#define IB_SA_PATH_REC_RATE IB_SA_COMP_MASK(19) +#define IB_SA_PATH_REC_PACKET_LIFE_TIME_SELECTOR IB_SA_COMP_MASK(20) +#define IB_SA_PATH_REC_PACKET_LIFE_TIME IB_SA_COMP_MASK(21) +#define IB_SA_PATH_REC_PREFERENCE IB_SA_COMP_MASK(22) + +struct ib_sa_path_rec { + /* reserved */ + /* reserved */ + union ib_gid dgid; + union ib_gid sgid; + __be16 dlid; + __be16 slid; + int raw_traffic; + /* reserved */ + __be32 flow_label; + u8 hop_limit; + u8 traffic_class; + int reversible; + u8 numb_path; + __be16 pkey; + /* reserved */ + u8 sl; + u8 mtu_selector; + u8 mtu; + u8 rate_selector; + u8 rate; + u8 packet_life_time_selector; + u8 packet_life_time; + u8 preference; +}; + +#define IB_SA_MCMEMBER_REC_MGID IB_SA_COMP_MASK( 0) +#define IB_SA_MCMEMBER_REC_PORT_GID IB_SA_COMP_MASK( 1) +#define IB_SA_MCMEMBER_REC_QKEY IB_SA_COMP_MASK( 2) +#define IB_SA_MCMEMBER_REC_MLID IB_SA_COMP_MASK( 3) +#define IB_SA_MCMEMBER_REC_MTU_SELECTOR IB_SA_COMP_MASK( 4) +#define IB_SA_MCMEMBER_REC_MTU IB_SA_COMP_MASK( 5) +#define IB_SA_MCMEMBER_REC_TRAFFIC_CLASS IB_SA_COMP_MASK( 6) +#define IB_SA_MCMEMBER_REC_PKEY IB_SA_COMP_MASK( 7) +#define IB_SA_MCMEMBER_REC_RATE_SELECTOR IB_SA_COMP_MASK( 8) +#define IB_SA_MCMEMBER_REC_RATE IB_SA_COMP_MASK( 9) +#define IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR IB_SA_COMP_MASK(10) +#define IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME IB_SA_COMP_MASK(11) +#define IB_SA_MCMEMBER_REC_SL IB_SA_COMP_MASK(12) +#define IB_SA_MCMEMBER_REC_FLOW_LABEL IB_SA_COMP_MASK(13) +#define IB_SA_MCMEMBER_REC_HOP_LIMIT IB_SA_COMP_MASK(14) +#define IB_SA_MCMEMBER_REC_SCOPE IB_SA_COMP_MASK(15) +#define IB_SA_MCMEMBER_REC_JOIN_STATE IB_SA_COMP_MASK(16) +#define IB_SA_MCMEMBER_REC_PROXY_JOIN IB_SA_COMP_MASK(17) + +struct ib_sa_mcmember_rec { + union ib_gid mgid; + union ib_gid port_gid; + __be32 qkey; + __be16 mlid; + u8 mtu_selector; + u8 mtu; + u8 traffic_class; + __be16 pkey; + u8 rate_selector; + u8 rate; + u8 packet_life_time_selector; + u8 packet_life_time; + u8 sl; + __be32 flow_label; + u8 hop_limit; + u8 scope; + u8 join_state; + int proxy_join; +}; + +/* Service Record Component Mask Sec 15.2.5.14 Ver 1.1 */ +#define IB_SA_SERVICE_REC_SERVICE_ID IB_SA_COMP_MASK( 0) +#define IB_SA_SERVICE_REC_SERVICE_GID IB_SA_COMP_MASK( 1) +#define IB_SA_SERVICE_REC_SERVICE_PKEY IB_SA_COMP_MASK( 2) +/* reserved: 3 */ +#define IB_SA_SERVICE_REC_SERVICE_LEASE IB_SA_COMP_MASK( 4) +#define IB_SA_SERVICE_REC_SERVICE_KEY IB_SA_COMP_MASK( 5) +#define IB_SA_SERVICE_REC_SERVICE_NAME IB_SA_COMP_MASK( 6) +#define IB_SA_SERVICE_REC_SERVICE_DATA8_0 IB_SA_COMP_MASK( 7) +#define IB_SA_SERVICE_REC_SERVICE_DATA8_1 IB_SA_COMP_MASK( 8) +#define IB_SA_SERVICE_REC_SERVICE_DATA8_2 IB_SA_COMP_MASK( 9) +#define IB_SA_SERVICE_REC_SERVICE_DATA8_3 IB_SA_COMP_MASK(10) +#define IB_SA_SERVICE_REC_SERVICE_DATA8_4 IB_SA_COMP_MASK(11) +#define IB_SA_SERVICE_REC_SERVICE_DATA8_5 IB_SA_COMP_MASK(12) +#define IB_SA_SERVICE_REC_SERVICE_DATA8_6 IB_SA_COMP_MASK(13) +#define IB_SA_SERVICE_REC_SERVICE_DATA8_7 IB_SA_COMP_MASK(14) +#define IB_SA_SERVICE_REC_SERVICE_DATA8_8 IB_SA_COMP_MASK(15) +#define IB_SA_SERVICE_REC_SERVICE_DATA8_9 IB_SA_COMP_MASK(16) +#define IB_SA_SERVICE_REC_SERVICE_DATA8_10 IB_SA_COMP_MASK(17) +#define IB_SA_SERVICE_REC_SERVICE_DATA8_11 IB_SA_COMP_MASK(18) +#define IB_SA_SERVICE_REC_SERVICE_DATA8_12 IB_SA_COMP_MASK(19) +#define IB_SA_SERVICE_REC_SERVICE_DATA8_13 IB_SA_COMP_MASK(20) +#define IB_SA_SERVICE_REC_SERVICE_DATA8_14 IB_SA_COMP_MASK(21) +#define IB_SA_SERVICE_REC_SERVICE_DATA8_15 IB_SA_COMP_MASK(22) +#define IB_SA_SERVICE_REC_SERVICE_DATA16_0 IB_SA_COMP_MASK(23) +#define IB_SA_SERVICE_REC_SERVICE_DATA16_1 IB_SA_COMP_MASK(24) +#define IB_SA_SERVICE_REC_SERVICE_DATA16_2 IB_SA_COMP_MASK(25) +#define IB_SA_SERVICE_REC_SERVICE_DATA16_3 IB_SA_COMP_MASK(26) +#define IB_SA_SERVICE_REC_SERVICE_DATA16_4 IB_SA_COMP_MASK(27) +#define IB_SA_SERVICE_REC_SERVICE_DATA16_5 IB_SA_COMP_MASK(28) +#define IB_SA_SERVICE_REC_SERVICE_DATA16_6 IB_SA_COMP_MASK(29) +#define IB_SA_SERVICE_REC_SERVICE_DATA16_7 IB_SA_COMP_MASK(30) +#define IB_SA_SERVICE_REC_SERVICE_DATA32_0 IB_SA_COMP_MASK(31) +#define IB_SA_SERVICE_REC_SERVICE_DATA32_1 IB_SA_COMP_MASK(32) +#define IB_SA_SERVICE_REC_SERVICE_DATA32_2 IB_SA_COMP_MASK(33) +#define IB_SA_SERVICE_REC_SERVICE_DATA32_3 IB_SA_COMP_MASK(34) +#define IB_SA_SERVICE_REC_SERVICE_DATA64_0 IB_SA_COMP_MASK(35) +#define IB_SA_SERVICE_REC_SERVICE_DATA64_1 IB_SA_COMP_MASK(36) + +#define IB_DEFAULT_SERVICE_LEASE 0xFFFFFFFF + +struct ib_sa_service_rec { + u64 id; + union ib_gid gid; + __be16 pkey; + /* reserved */ + u32 lease; + u8 key[16]; + u8 name[64]; + u8 data8[16]; + u16 data16[8]; + u32 data32[4]; + u64 data64[2]; +}; + +struct ib_sa_query; + +void ib_sa_cancel_query(int id, struct ib_sa_query *query); + +int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, + struct ib_sa_path_rec *rec, + ib_sa_comp_mask comp_mask, + int timeout_ms, unsigned int __nocast gfp_mask, + void (*callback)(int status, + struct ib_sa_path_rec *resp, + void *context), + void *context, + struct ib_sa_query **query); + +int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, + u8 method, + struct ib_sa_mcmember_rec *rec, + ib_sa_comp_mask comp_mask, + int timeout_ms, unsigned int __nocast gfp_mask, + void (*callback)(int status, + struct ib_sa_mcmember_rec *resp, + void *context), + void *context, + struct ib_sa_query **query); + +int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, + u8 method, + struct ib_sa_service_rec *rec, + ib_sa_comp_mask comp_mask, + int timeout_ms, unsigned int __nocast gfp_mask, + void (*callback)(int status, + struct ib_sa_service_rec *resp, + void *context), + void *context, + struct ib_sa_query **sa_query); + +/** + * ib_sa_mcmember_rec_set - Start an MCMember set query + * @device:device to send query on + * @port_num: port number to send query on + * @rec:MCMember Record to send in query + * @comp_mask:component mask to send in query + * @timeout_ms:time to wait for response + * @gfp_mask:GFP mask to use for internal allocations + * @callback:function called when query completes, times out or is + * canceled + * @context:opaque user context passed to callback + * @sa_query:query context, used to cancel query + * + * Send an MCMember Set query to the SA (eg to join a multicast + * group). The callback function will be called when the query + * completes (or fails); status is 0 for a successful response, -EINTR + * if the query is canceled, -ETIMEDOUT is the query timed out, or + * -EIO if an error occurred sending the query. The resp parameter of + * the callback is only valid if status is 0. + * + * If the return value of ib_sa_mcmember_rec_set() is negative, it is + * an error code. Otherwise it is a query ID that can be used to + * cancel the query. + */ +static inline int +ib_sa_mcmember_rec_set(struct ib_device *device, u8 port_num, + struct ib_sa_mcmember_rec *rec, + ib_sa_comp_mask comp_mask, + int timeout_ms, unsigned int __nocast gfp_mask, + void (*callback)(int status, + struct ib_sa_mcmember_rec *resp, + void *context), + void *context, + struct ib_sa_query **query) +{ + return ib_sa_mcmember_rec_query(device, port_num, + IB_MGMT_METHOD_SET, + rec, comp_mask, + timeout_ms, gfp_mask, callback, + context, query); +} + +/** + * ib_sa_mcmember_rec_delete - Start an MCMember delete query + * @device:device to send query on + * @port_num: port number to send query on + * @rec:MCMember Record to send in query + * @comp_mask:component mask to send in query + * @timeout_ms:time to wait for response + * @gfp_mask:GFP mask to use for internal allocations + * @callback:function called when query completes, times out or is + * canceled + * @context:opaque user context passed to callback + * @sa_query:query context, used to cancel query + * + * Send an MCMember Delete query to the SA (eg to leave a multicast + * group). The callback function will be called when the query + * completes (or fails); status is 0 for a successful response, -EINTR + * if the query is canceled, -ETIMEDOUT is the query timed out, or + * -EIO if an error occurred sending the query. The resp parameter of + * the callback is only valid if status is 0. + * + * If the return value of ib_sa_mcmember_rec_delete() is negative, it + * is an error code. Otherwise it is a query ID that can be used to + * cancel the query. + */ +static inline int +ib_sa_mcmember_rec_delete(struct ib_device *device, u8 port_num, + struct ib_sa_mcmember_rec *rec, + ib_sa_comp_mask comp_mask, + int timeout_ms, unsigned int __nocast gfp_mask, + void (*callback)(int status, + struct ib_sa_mcmember_rec *resp, + void *context), + void *context, + struct ib_sa_query **query) +{ + return ib_sa_mcmember_rec_query(device, port_num, + IB_SA_METHOD_DELETE, + rec, comp_mask, + timeout_ms, gfp_mask, callback, + context, query); +} + + +#endif /* IB_SA_H */ diff --git a/branches/MTHCA/inc/kernel/mthca/ib_smi.h b/branches/MTHCA/inc/kernel/mthca/ib_smi.h new file mode 100644 index 00000000..eb402712 --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/ib_smi.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2004 Infinicon Corporation. All rights reserved. + * Copyright (c) 2004 Intel Corporation. All rights reserved. + * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: ib_smi.h 2928 2005-07-28 18:45:56Z sean.hefty $ + */ + +#if !defined( IB_SMI_H ) +#define IB_SMI_H + +#include + +#define IB_SMP_DATA_SIZE 64 +#define IB_SMP_MAX_PATH_HOPS 64 + +#pragma pack(push,1) +struct ib_smp { + u8 base_version; + u8 mgmt_class; + u8 class_version; + u8 method; + __be16 status; + u8 hop_ptr; + u8 hop_cnt; + __be64 tid; + __be16 attr_id; + __be16 resv; + __be32 attr_mod; + __be64 mkey; + __be16 dr_slid; + __be16 dr_dlid; + u8 reserved[28]; + u8 data[IB_SMP_DATA_SIZE]; + u8 initial_path[IB_SMP_MAX_PATH_HOPS]; + u8 return_path[IB_SMP_MAX_PATH_HOPS]; +} __attribute__ ((packed)); +#pragma pack(pop) + + +/* Subnet management attributes */ +#define IB_SMP_ATTR_NOTICE cl_hton16(0x0002) +#define IB_SMP_ATTR_NODE_DESC cl_hton16(0x0010) +#define IB_SMP_ATTR_NODE_INFO cl_hton16(0x0011) +#define IB_SMP_ATTR_SWITCH_INFO cl_hton16(0x0012) +#define IB_SMP_ATTR_GUID_INFO cl_hton16(0x0014) +#define IB_SMP_ATTR_PORT_INFO cl_hton16(0x0015) +#define IB_SMP_ATTR_PKEY_TABLE cl_hton16(0x0016) +#define IB_SMP_ATTR_SL_TO_VL_TABLE cl_hton16(0x0017) +#define IB_SMP_ATTR_VL_ARB_TABLE cl_hton16(0x0018) +#define IB_SMP_ATTR_LINEAR_FORWARD_TABLE cl_hton16(0x0019) +#define IB_SMP_ATTR_RANDOM_FORWARD_TABLE cl_hton16(0x001A) +#define IB_SMP_ATTR_MCAST_FORWARD_TABLE cl_hton16(0x001B) +#define IB_SMP_ATTR_SM_INFO cl_hton16(0x0020) +#define IB_SMP_ATTR_VENDOR_DIAG cl_hton16(0x0030) +#define IB_SMP_ATTR_LED_INFO cl_hton16(0x0031) +#define IB_SMP_ATTR_VENDOR_MASK cl_hton16(0xFF00) + +static inline u8 +ib_get_smp_direction(struct ib_smp *smp) +{ + return ((smp->status & IB_SMP_DIRECTION) == IB_SMP_DIRECTION); +} + +#endif /* IB_SMI_H */ diff --git a/branches/MTHCA/inc/kernel/mthca/ib_user_at.h b/branches/MTHCA/inc/kernel/mthca/ib_user_at.h new file mode 100644 index 00000000..071ea959 --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/ib_user_at.h @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: ib_user_at.h 2923 2005-07-28 13:01:04Z halr $ + */ + +#ifndef IB_USER_AT_H +#define IB_USER_AT_H + +#include + +#define IB_USER_AT_ABI_VERSION 1 + +enum { + IB_USER_AT_CMD_ROUTE_BY_IP, + IB_USER_AT_CMD_PATHS_BY_ROUTE, + IB_USER_AT_CMD_IPS_BY_GID, + IB_USER_AT_CMD_IPS_BY_SUBNET, + IB_USER_AT_CMD_INVALIDATE_PATHS, + IB_USER_AT_CMD_CANCEL, + IB_USER_AT_CMD_STATUS, + + IB_USER_AT_CMD_EVENT, +}; + +/* + * command ABI structures. + */ +struct ib_uat_cmd_hdr { + __u32 cmd; + __u16 in; + __u16 out; +}; + +enum ib_uat_multipathing_type { + IB_USER_AT_PATH_SAME_PORT = 0, + IB_USER_AT_PATH_SAME_HCA = 1, /* but different ports if applicable */ + IB_USER_AT_PATH_SAME_SYSTEM = 2, /* but different ports if applicable */ + IB_USER_AT_PATH_INDEPENDENT_HCA = 3, + IB_USER_AT_PATH_SRC_ROUTE = 4, /* application controlled multipathing */ +}; + +enum ib_uat_route_flags { + IB_USER_AT_ROUTE_USE_DEFAULTS = 0, + IB_USER_AT_ROUTE_FORCE_ATS = 1, + IB_USER_AT_ROUTE_FORCE_ARP = 2, + IB_USER_AT_ROUTE_FORCE_RESOLVE = 4, +}; + +struct ib_uat_path_attr { + __u16 qos_tag; + __u16 pkey; + __u8 multi_path_type; +}; + +struct ib_uat_ib_route { + __u8 sgid[16]; + __u8 dgid[16]; + struct ibv_device *out_dev; + int out_port; + struct ib_uat_path_attr attr; +}; + +enum ib_uat_op_status { + IB_USER_AT_STATUS_INVALID = 0, + IB_USER_AT_STATUS_PENDING = 1, + IB_USER_AT_STATUS_COMPLETED = 2, + IB_USER_AT_STATUS_CANCELED = 3, +}; + +struct ib_uat_completion { + void (*fn)(__u64 req_id, void *context, int rec_num); + void *context; + __u64 req_id; +}; + +struct ib_uat_paths_by_route_req { + struct ib_uat_ib_route *ib_route; + __u32 mpath_type; + struct ib_sa_path_rec *path_arr; + int npath; + struct ib_uat_completion *async_comp; + __u64 response; +}; + +struct ib_uat_paths_by_route_resp { + __u64 req_id; +}; + +struct ib_uat_route_by_ip_req { + __u32 dst_ip; + __u32 src_ip; + int tos; + __u16 flags; + struct ib_uat_ib_route *ib_route; + struct ib_uat_completion *async_comp; + __u64 response; +}; + +struct ib_uat_route_by_ip_resp { + __u64 req_id; +}; + +struct ib_uat_ips_by_gid_req { + union ibv_gid *gid; + __u32 *dst_ips; + int nips; + struct ib_uat_completion *async_comp; + __u64 response; +}; + +struct ib_uat_ips_by_gid_resp { + __u64 req_id; +}; + +struct ib_uat_ips_by_subnet_req { + __u32 network; + __u32 netmask; + __u32 *dst_ips; + int nips; +}; + +struct ib_uat_invalidate_paths_req { + struct ib_uat_ib_route *ib_route; +}; + +struct ib_uat_cancel_req { + __u64 req_id; +}; + +struct ib_uat_status_req { + __u64 req_id; +}; + +/* + * event notification ABI structures. + */ +struct ib_uat_event_get { + __u64 response; +}; + +struct ib_uat_event_resp { + __u64 callback; + __u64 context; + __u64 req_id; + int rec_num; +}; +#endif /* IB_USER_AT_H */ diff --git a/branches/MTHCA/inc/kernel/mthca/ib_user_cm.h b/branches/MTHCA/inc/kernel/mthca/ib_user_cm.h new file mode 100644 index 00000000..3c78ad78 --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/ib_user_cm.h @@ -0,0 +1,326 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: ib_user_cm.h 2928 2005-07-28 18:45:56Z sean.hefty $ + */ + +#ifndef IB_USER_CM_H +#define IB_USER_CM_H + +#define IB_USER_CM_ABI_VERSION 1 + +enum { + IB_USER_CM_CMD_CREATE_ID, + IB_USER_CM_CMD_DESTROY_ID, + IB_USER_CM_CMD_ATTR_ID, + + IB_USER_CM_CMD_LISTEN, + IB_USER_CM_CMD_ESTABLISH, + + IB_USER_CM_CMD_SEND_REQ, + IB_USER_CM_CMD_SEND_REP, + IB_USER_CM_CMD_SEND_RTU, + IB_USER_CM_CMD_SEND_DREQ, + IB_USER_CM_CMD_SEND_DREP, + IB_USER_CM_CMD_SEND_REJ, + IB_USER_CM_CMD_SEND_MRA, + IB_USER_CM_CMD_SEND_LAP, + IB_USER_CM_CMD_SEND_APR, + IB_USER_CM_CMD_SEND_SIDR_REQ, + IB_USER_CM_CMD_SEND_SIDR_REP, + + IB_USER_CM_CMD_EVENT, +}; +/* + * command ABI structures. + */ +struct ib_ucm_cmd_hdr { + __u32 cmd; + __u16 in; + __u16 out; +}; + +struct ib_ucm_create_id { + __u64 response; +}; + +struct ib_ucm_create_id_resp { + __u32 id; +}; + +struct ib_ucm_destroy_id { + __u32 id; +}; + +struct ib_ucm_attr_id { + __u64 response; + __u32 id; +}; + +struct ib_ucm_attr_id_resp { + __be64 service_id; + __be64 service_mask; + __be32 local_id; + __be32 remote_id; +}; + +struct ib_ucm_listen { + __be64 service_id; + __be64 service_mask; + __u32 id; +}; + +struct ib_ucm_establish { + __u32 id; +}; + +struct ib_ucm_private_data { + __u64 data; + __u32 id; + __u8 len; + __u8 reserved[3]; +}; + +struct ib_ucm_path_rec { + __u8 dgid[16]; + __u8 sgid[16]; + __be16 dlid; + __be16 slid; + __u32 raw_traffic; + __be32 flow_label; + __u32 reversible; + __u32 mtu; + __be16 pkey; + __u8 hop_limit; + __u8 traffic_class; + __u8 numb_path; + __u8 sl; + __u8 mtu_selector; + __u8 rate_selector; + __u8 rate; + __u8 packet_life_time_selector; + __u8 packet_life_time; + __u8 preference; +}; + +struct ib_ucm_req { + __u32 id; + __u32 qpn; + __u32 qp_type; + __u32 psn; + __be64 sid; + __u64 data; + __u64 primary_path; + __u64 alternate_path; + __u8 len; + __u8 peer_to_peer; + __u8 responder_resources; + __u8 initiator_depth; + __u8 remote_cm_response_timeout; + __u8 flow_control; + __u8 local_cm_response_timeout; + __u8 retry_count; + __u8 rnr_retry_count; + __u8 max_cm_retries; + __u8 srq; + __u8 reserved[1]; +}; + +struct ib_ucm_rep { + __u64 data; + __u32 id; + __u32 qpn; + __u32 psn; + __u8 len; + __u8 responder_resources; + __u8 initiator_depth; + __u8 target_ack_delay; + __u8 failover_accepted; + __u8 flow_control; + __u8 rnr_retry_count; + __u8 srq; +}; + +struct ib_ucm_info { + __u32 id; + __u32 status; + __u64 info; + __u64 data; + __u8 info_len; + __u8 data_len; + __u8 reserved[2]; +}; + +struct ib_ucm_mra { + __u64 data; + __u32 id; + __u8 len; + __u8 timeout; + __u8 reserved[2]; +}; + +struct ib_ucm_lap { + __u64 path; + __u64 data; + __u32 id; + __u8 len; + __u8 reserved[3]; +}; + +struct ib_ucm_sidr_req { + __u32 id; + __u32 timeout; + __be64 sid; + __u64 data; + __u64 path; + __u16 pkey; + __u8 len; + __u8 max_cm_retries; +}; + +struct ib_ucm_sidr_rep { + __u32 id; + __u32 qpn; + __u32 qkey; + __u32 status; + __u64 info; + __u64 data; + __u8 info_len; + __u8 data_len; + __u8 reserved[2]; +}; +/* + * event notification ABI structures. + */ +struct ib_ucm_event_get { + __u64 response; + __u64 data; + __u64 info; + __u8 data_len; + __u8 info_len; + __u8 reserved[2]; +}; + +struct ib_ucm_req_event_resp { + __u32 listen_id; + /* device */ + /* port */ + struct ib_ucm_path_rec primary_path; + struct ib_ucm_path_rec alternate_path; + __be64 remote_ca_guid; + __u32 remote_qkey; + __u32 remote_qpn; + __u32 qp_type; + __u32 starting_psn; + __u8 responder_resources; + __u8 initiator_depth; + __u8 local_cm_response_timeout; + __u8 flow_control; + __u8 remote_cm_response_timeout; + __u8 retry_count; + __u8 rnr_retry_count; + __u8 srq; +}; + +struct ib_ucm_rep_event_resp { + __be64 remote_ca_guid; + __u32 remote_qkey; + __u32 remote_qpn; + __u32 starting_psn; + __u8 responder_resources; + __u8 initiator_depth; + __u8 target_ack_delay; + __u8 failover_accepted; + __u8 flow_control; + __u8 rnr_retry_count; + __u8 srq; + __u8 reserved[1]; +}; + +struct ib_ucm_rej_event_resp { + __u32 reason; + /* ari in ib_ucm_event_get info field. */ +}; + +struct ib_ucm_mra_event_resp { + __u8 timeout; + __u8 reserved[3]; +}; + +struct ib_ucm_lap_event_resp { + struct ib_ucm_path_rec path; +}; + +struct ib_ucm_apr_event_resp { + __u32 status; + /* apr info in ib_ucm_event_get info field. */ +}; + +struct ib_ucm_sidr_req_event_resp { + __u32 listen_id; + /* device */ + /* port */ + __u16 pkey; + __u8 reserved[2]; +}; + +struct ib_ucm_sidr_rep_event_resp { + __u32 status; + __u32 qkey; + __u32 qpn; + /* info in ib_ucm_event_get info field. */ +}; + +#define IB_UCM_PRES_DATA 0x01 +#define IB_UCM_PRES_INFO 0x02 +#define IB_UCM_PRES_PRIMARY 0x04 +#define IB_UCM_PRES_ALTERNATE 0x08 + +struct ib_ucm_event_resp { + __u32 id; + __u32 event; + __u32 present; + union { + struct ib_ucm_req_event_resp req_resp; + struct ib_ucm_rep_event_resp rep_resp; + struct ib_ucm_rej_event_resp rej_resp; + struct ib_ucm_mra_event_resp mra_resp; + struct ib_ucm_lap_event_resp lap_resp; + struct ib_ucm_apr_event_resp apr_resp; + + struct ib_ucm_sidr_req_event_resp sidr_req_resp; + struct ib_ucm_sidr_rep_event_resp sidr_rep_resp; + + __u32 send_status; + } u; +}; + +#endif /* IB_USER_CM_H */ diff --git a/branches/MTHCA/inc/kernel/mthca/ib_user_mad.h b/branches/MTHCA/inc/kernel/mthca/ib_user_mad.h new file mode 100644 index 00000000..8ae4f846 --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/ib_user_mad.h @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: ib_user_mad.h 2928 2005-07-28 18:45:56Z sean.hefty $ + */ + +#ifndef IB_USER_MAD_H +#define IB_USER_MAD_H + +/* + * Increment this value if any changes that break userspace ABI + * compatibility are made. + */ +#define IB_USER_MAD_ABI_VERSION 5 + +/* + * Make sure that all structs defined in this file remain laid out so + * that they pack the same way on 32-bit and 64-bit architectures (to + * avoid incompatibility between 32-bit userspace and 64-bit kernels). + */ + +/** + * ib_user_mad_hdr - MAD packet header + * @id - ID of agent MAD received with/to be sent with + * @status - 0 on successful receive, ETIMEDOUT if no response + * received (transaction ID in data[] will be set to TID of original + * request) (ignored on send) + * @timeout_ms - Milliseconds to wait for response (unset on receive) + * @retries - Number of automatic retries to attempt + * @qpn - Remote QP number received from/to be sent to + * @qkey - Remote Q_Key to be sent with (unset on receive) + * @lid - Remote lid received from/to be sent to + * @sl - Service level received with/to be sent with + * @path_bits - Local path bits received with/to be sent with + * @grh_present - If set, GRH was received/should be sent + * @gid_index - Local GID index to send with (unset on receive) + * @hop_limit - Hop limit in GRH + * @traffic_class - Traffic class in GRH + * @gid - Remote GID in GRH + * @flow_label - Flow label in GRH + */ +struct ib_user_mad_hdr { + __u32 id; + __u32 status; + __u32 timeout_ms; + __u32 retries; + __u32 length; + __be32 qpn; + __be32 qkey; + __be16 lid; + __u8 sl; + __u8 path_bits; + __u8 grh_present; + __u8 gid_index; + __u8 hop_limit; + __u8 traffic_class; + __u8 gid[16]; + __be32 flow_label; +}; + +/** + * ib_user_mad - MAD packet + * @hdr - MAD packet header + * @data - Contents of MAD + * + */ +struct ib_user_mad { + struct ib_user_mad_hdr hdr; + __u8 data[0]; +}; + +/** + * ib_user_mad_reg_req - MAD registration request + * @id - Set by the kernel; used to identify agent in future requests. + * @qpn - Queue pair number; must be 0 or 1. + * @method_mask - The caller will receive unsolicited MADs for any method + * where @method_mask = 1. + * @mgmt_class - Indicates which management class of MADs should be receive + * by the caller. This field is only required if the user wishes to + * receive unsolicited MADs, otherwise it should be 0. + * @mgmt_class_version - Indicates which version of MADs for the given + * management class to receive. + * @oui: Indicates IEEE OUI when mgmt_class is a vendor class + * in the range from 0x30 to 0x4f. Otherwise not used. + * @rmpp_version: If set, indicates the RMPP version used. + * + */ +struct ib_user_mad_reg_req { + __u32 id; + __u32 method_mask[4]; + __u8 qpn; + __u8 mgmt_class; + __u8 mgmt_class_version; + __u8 oui[3]; + __u8 rmpp_version; +}; + +#define IB_IOCTL_MAGIC 0x1b + +#define IB_USER_MAD_REGISTER_AGENT _IOWR(IB_IOCTL_MAGIC, 1, \ + struct ib_user_mad_reg_req) + +#define IB_USER_MAD_UNREGISTER_AGENT _IOW(IB_IOCTL_MAGIC, 2, __u32) + +#endif /* IB_USER_MAD_H */ diff --git a/branches/MTHCA/inc/kernel/mthca/ib_user_verbs.h b/branches/MTHCA/inc/kernel/mthca/ib_user_verbs.h new file mode 100644 index 00000000..d929b899 --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/ib_user_verbs.h @@ -0,0 +1,420 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: ib_user_verbs.h 3045 2005-08-10 03:52:23Z roland $ + */ + +#ifndef IB_USER_VERBS_H +#define IB_USER_VERBS_H + +/* + * Increment this value if any changes that break userspace ABI + * compatibility are made. + */ +#define IB_USER_VERBS_ABI_VERSION 1 + +enum { + IB_USER_VERBS_CMD_QUERY_PARAMS, + IB_USER_VERBS_CMD_GET_CONTEXT, + IB_USER_VERBS_CMD_QUERY_DEVICE, + IB_USER_VERBS_CMD_QUERY_PORT, + IB_USER_VERBS_CMD_QUERY_GID, + IB_USER_VERBS_CMD_QUERY_PKEY, + IB_USER_VERBS_CMD_ALLOC_PD, + IB_USER_VERBS_CMD_DEALLOC_PD, + IB_USER_VERBS_CMD_CREATE_AH, + IB_USER_VERBS_CMD_MODIFY_AH, + IB_USER_VERBS_CMD_QUERY_AH, + IB_USER_VERBS_CMD_DESTROY_AH, + IB_USER_VERBS_CMD_REG_MR, + IB_USER_VERBS_CMD_REG_SMR, + IB_USER_VERBS_CMD_REREG_MR, + IB_USER_VERBS_CMD_QUERY_MR, + IB_USER_VERBS_CMD_DEREG_MR, + IB_USER_VERBS_CMD_ALLOC_MW, + IB_USER_VERBS_CMD_BIND_MW, + IB_USER_VERBS_CMD_DEALLOC_MW, + IB_USER_VERBS_CMD_CREATE_CQ, + IB_USER_VERBS_CMD_RESIZE_CQ, + IB_USER_VERBS_CMD_DESTROY_CQ, + IB_USER_VERBS_CMD_POLL_CQ, + IB_USER_VERBS_CMD_PEEK_CQ, + IB_USER_VERBS_CMD_REQ_NOTIFY_CQ, + IB_USER_VERBS_CMD_CREATE_QP, + IB_USER_VERBS_CMD_QUERY_QP, + IB_USER_VERBS_CMD_MODIFY_QP, + IB_USER_VERBS_CMD_DESTROY_QP, + IB_USER_VERBS_CMD_POST_SEND, + IB_USER_VERBS_CMD_POST_RECV, + IB_USER_VERBS_CMD_ATTACH_MCAST, + IB_USER_VERBS_CMD_DETACH_MCAST, + IB_USER_VERBS_CMD_CREATE_SRQ, + IB_USER_VERBS_CMD_MODIFY_SRQ, + IB_USER_VERBS_CMD_QUERY_SRQ, + IB_USER_VERBS_CMD_DESTROY_SRQ, + IB_USER_VERBS_CMD_POST_SRQ_RECV +}; + +/* + * Make sure that all structs defined in this file remain laid out so + * that they pack the same way on 32-bit and 64-bit architectures (to + * avoid incompatibility between 32-bit userspace and 64-bit kernels). + * In particular do not use pointer types -- pass pointers in __u64 + * instead. + */ + +struct ib_uverbs_async_event_desc { + __u64 element; + __u32 event_type; /* enum ib_event_type */ + __u32 reserved; +}; + +struct ib_uverbs_comp_event_desc { + __u64 cq_handle; +}; + +/* + * All commands from userspace should start with a __u32 command field + * followed by __u16 in_words and out_words fields (which give the + * length of the command block and response buffer if any in 32-bit + * words). The kernel driver will read these fields first and read + * the rest of the command struct based on these value. + */ + +struct ib_uverbs_cmd_hdr { + __u32 command; + __u16 in_words; + __u16 out_words; +}; + +/* + * No driver_data for "query params" command, since this is intended + * to be a core function with no possible device dependence. + */ +struct ib_uverbs_query_params { + __u64 response; +}; + +struct ib_uverbs_query_params_resp { + __u32 num_cq_events; +}; + +struct ib_uverbs_get_context { + __u64 response; + __u64 cq_fd_tab; + __u64 driver_data[0]; +}; + +struct ib_uverbs_get_context_resp { + __u32 async_fd; + __u32 reserved; +}; + +struct ib_uverbs_query_device { + __u64 response; + __u64 driver_data[0]; +}; + +struct ib_uverbs_query_device_resp { + __u64 fw_ver; + __be64 node_guid; + __be64 sys_image_guid; + __u64 max_mr_size; + __u64 page_size_cap; + __u32 vendor_id; + __u32 vendor_part_id; + __u32 hw_ver; + __u32 max_qp; + __u32 max_qp_wr; + __u32 device_cap_flags; + __u32 max_sge; + __u32 max_sge_rd; + __u32 max_cq; + __u32 max_cqe; + __u32 max_mr; + __u32 max_pd; + __u32 max_qp_rd_atom; + __u32 max_ee_rd_atom; + __u32 max_res_rd_atom; + __u32 max_qp_init_rd_atom; + __u32 max_ee_init_rd_atom; + __u32 atomic_cap; + __u32 max_ee; + __u32 max_rdd; + __u32 max_mw; + __u32 max_raw_ipv6_qp; + __u32 max_raw_ethy_qp; + __u32 max_mcast_grp; + __u32 max_mcast_qp_attach; + __u32 max_total_mcast_qp_attach; + __u32 max_ah; + __u32 max_fmr; + __u32 max_map_per_fmr; + __u32 max_srq; + __u32 max_srq_wr; + __u32 max_srq_sge; + __u16 max_pkeys; + __u8 local_ca_ack_delay; + __u8 phys_port_cnt; + __u8 reserved[4]; +}; + +struct ib_uverbs_query_port { + __u64 response; + __u8 port_num; + __u8 reserved[7]; + __u64 driver_data[0]; +}; + +struct ib_uverbs_query_port_resp { + __u32 port_cap_flags; + __u32 max_msg_sz; + __u32 bad_pkey_cntr; + __u32 qkey_viol_cntr; + __u32 gid_tbl_len; + __u16 pkey_tbl_len; + __u16 lid; + __u16 sm_lid; + __u8 state; + __u8 max_mtu; + __u8 active_mtu; + __u8 lmc; + __u8 max_vl_num; + __u8 sm_sl; + __u8 subnet_timeout; + __u8 init_type_reply; + __u8 active_width; + __u8 active_speed; + __u8 phys_state; + __u8 reserved[3]; +}; + +struct ib_uverbs_query_gid { + __u64 response; + __u8 port_num; + __u8 index; + __u8 reserved[6]; + __u64 driver_data[0]; +}; + +struct ib_uverbs_query_gid_resp { + __u8 gid[16]; +}; + +struct ib_uverbs_query_pkey { + __u64 response; + __u8 port_num; + __u8 index; + __u8 reserved[6]; + __u64 driver_data[0]; +}; + +struct ib_uverbs_query_pkey_resp { + __u16 pkey; + __u16 reserved; +}; + +struct ib_uverbs_alloc_pd { + __u64 response; + __u64 driver_data[0]; +}; + +struct ib_uverbs_alloc_pd_resp { + __u32 pd_handle; +}; + +struct ib_uverbs_dealloc_pd { + __u32 pd_handle; +}; + +struct ib_uverbs_reg_mr { + __u64 response; + __u64 start; + __u64 length; + __u64 hca_va; + __u32 pd_handle; + __u32 access_flags; + __u64 driver_data[0]; +}; + +struct ib_uverbs_reg_mr_resp { + __u32 mr_handle; + __u32 lkey; + __u32 rkey; +}; + +struct ib_uverbs_dereg_mr { + __u32 mr_handle; +}; + +struct ib_uverbs_create_cq { + __u64 response; + __u64 user_handle; + __u32 cqe; + __u32 event_handler; + __u64 driver_data[0]; +}; + +struct ib_uverbs_create_cq_resp { + __u32 cq_handle; + __u32 cqe; +}; + +struct ib_uverbs_destroy_cq { + __u32 cq_handle; +}; + +struct ib_uverbs_create_qp { + __u64 response; + __u64 user_handle; + __u32 pd_handle; + __u32 send_cq_handle; + __u32 recv_cq_handle; + __u32 srq_handle; + __u32 max_send_wr; + __u32 max_recv_wr; + __u32 max_send_sge; + __u32 max_recv_sge; + __u32 max_inline_data; + __u8 sq_sig_all; + __u8 qp_type; + __u8 is_srq; + __u8 reserved; + __u64 driver_data[0]; +}; + +struct ib_uverbs_create_qp_resp { + __u32 qp_handle; + __u32 qpn; +}; + +/* + * This struct needs to remain a multiple of 8 bytes to keep the + * alignment of the modify QP parameters. + */ +struct ib_uverbs_qp_dest { + __u8 dgid[16]; + __u32 flow_label; + __u16 dlid; + __u16 reserved; + __u8 sgid_index; + __u8 hop_limit; + __u8 traffic_class; + __u8 sl; + __u8 src_path_bits; + __u8 static_rate; + __u8 is_global; + __u8 port_num; +}; + +struct ib_uverbs_modify_qp { + struct ib_uverbs_qp_dest dest; + struct ib_uverbs_qp_dest alt_dest; + __u32 qp_handle; + __u32 attr_mask; + __u32 qkey; + __u32 rq_psn; + __u32 sq_psn; + __u32 dest_qp_num; + __u32 qp_access_flags; + __u16 pkey_index; + __u16 alt_pkey_index; + __u8 qp_state; + __u8 cur_qp_state; + __u8 path_mtu; + __u8 path_mig_state; + __u8 en_sqd_async_notify; + __u8 max_rd_atomic; + __u8 max_dest_rd_atomic; + __u8 min_rnr_timer; + __u8 port_num; + __u8 timeout; + __u8 retry_cnt; + __u8 rnr_retry; + __u8 alt_port_num; + __u8 alt_timeout; + __u8 reserved[2]; + __u64 driver_data[0]; +}; + +struct ib_uverbs_modify_qp_resp { +}; + +struct ib_uverbs_destroy_qp { + __u32 qp_handle; +}; + +struct ib_uverbs_attach_mcast { + __u8 gid[16]; + __u32 qp_handle; + __u16 mlid; + __u16 reserved; + __u64 driver_data[0]; +}; + +struct ib_uverbs_detach_mcast { + __u8 gid[16]; + __u32 qp_handle; + __u16 mlid; + __u16 reserved; + __u64 driver_data[0]; +}; + +struct ib_uverbs_create_srq { + __u64 response; + __u64 user_handle; + __u32 pd_handle; + __u32 max_wr; + __u32 max_sge; + __u32 srq_limit; + __u64 driver_data[0]; +}; + +struct ib_uverbs_create_srq_resp { + __u32 srq_handle; +}; + +struct ib_uverbs_modify_srq { + __u32 srq_handle; + __u32 attr_mask; + __u32 max_wr; + __u32 max_sge; + __u32 srq_limit; + __u32 reserved; + __u64 driver_data[0]; +}; + +struct ib_uverbs_destroy_srq { + __u32 srq_handle; +}; + +#endif /* IB_USER_VERBS_H */ diff --git a/branches/MTHCA/inc/kernel/mthca/ib_verbs.h b/branches/MTHCA/inc/kernel/mthca/ib_verbs.h new file mode 100644 index 00000000..2cf521ee --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/ib_verbs.h @@ -0,0 +1,1465 @@ +/* + * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2004 Infinicon Corporation. All rights reserved. + * Copyright (c) 2004 Intel Corporation. All rights reserved. + * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: ib_verbs.h 2975 2005-08-04 04:59:37Z roland $ + */ + +#if !defined(IB_VERBS_H) +#define IB_VERBS_H + +#include +#include "mt_l2w.h" + +union ib_gid { + u8 raw[16]; + struct { + __be64 subnet_prefix; + __be64 interface_id; + } global; +}; + +enum ib_node_type { + IB_NODE_CA = 1, + IB_NODE_SWITCH, + IB_NODE_ROUTER +}; + +enum ib_device_cap_flags { + IB_DEVICE_RESIZE_MAX_WR = 1, + IB_DEVICE_BAD_PKEY_CNTR = (1<<1), + IB_DEVICE_BAD_QKEY_CNTR = (1<<2), + IB_DEVICE_RAW_MULTI = (1<<3), + IB_DEVICE_AUTO_PATH_MIG = (1<<4), + IB_DEVICE_CHANGE_PHY_PORT = (1<<5), + IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6), + IB_DEVICE_CURR_QP_STATE_MOD = (1<<7), + IB_DEVICE_SHUTDOWN_PORT = (1<<8), + IB_DEVICE_INIT_TYPE = (1<<9), + IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10), + IB_DEVICE_SYS_IMAGE_GUID = (1<<11), + IB_DEVICE_RC_RNR_NAK_GEN = (1<<12), + IB_DEVICE_SRQ_RESIZE = (1<<13), + IB_DEVICE_N_NOTIFY_CQ = (1<<14), +}; + +#ifdef LINUX_TO_BE_REMOVED +// defined in ib_types.h +enum ib_atomic_cap { + IB_ATOMIC_NONE, + IB_ATOMIC_HCA, + IB_ATOMIC_GLOB +}; +#endif + +struct ib_device_attr { + u64 fw_ver; + __be64 node_guid; + __be64 sys_image_guid; + u64 max_mr_size; + u64 page_size_cap; + u32 vendor_id; + u32 vendor_part_id; + u32 hw_ver; + int max_qp; + int max_qp_wr; + int device_cap_flags; + int max_sge; + int max_sge_rd; + int max_cq; + int max_cqe; + int max_mr; + int max_pd; + int max_qp_rd_atom; + int max_ee_rd_atom; + int max_res_rd_atom; + int max_qp_init_rd_atom; + int max_ee_init_rd_atom; + enum ib_atomic_cap atomic_cap; + int max_ee; + int max_rdd; + int max_mw; + int max_raw_ipv6_qp; + int max_raw_ethy_qp; + int max_mcast_grp; + int max_mcast_qp_attach; + int max_total_mcast_qp_attach; + int max_ah; + int max_fmr; + int max_map_per_fmr; + int max_srq; + int max_srq_wr; + int max_srq_sge; + u16 max_pkeys; + u8 local_ca_ack_delay; +}; + +static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) +{ + switch (mtu) { + case IB_MTU_256: return 256; + case IB_MTU_512: return 512; + case IB_MTU_1024: return 1024; + case IB_MTU_2048: return 2048; + case IB_MTU_4096: return 4096; + default: return -1; + } +} + +enum ib_port_state { + IB_PORT_NOP = 0, + IB_PORT_DOWN = 1, + IB_PORT_INIT = 2, + IB_PORT_ARMED = 3, + IB_PORT_ACTIVE = 4, + IB_PORT_ACTIVE_DEFER = 5 +}; + +enum ib_port_cap_flags { + IB_PORT_SM = 1 << 1, + IB_PORT_NOTICE_SUP = 1 << 2, + IB_PORT_TRAP_SUP = 1 << 3, + IB_PORT_OPT_IPD_SUP = 1 << 4, + IB_PORT_AUTO_MIGR_SUP = 1 << 5, + IB_PORT_SL_MAP_SUP = 1 << 6, + IB_PORT_MKEY_NVRAM = 1 << 7, + IB_PORT_PKEY_NVRAM = 1 << 8, + IB_PORT_LED_INFO_SUP = 1 << 9, + IB_PORT_SM_DISABLED = 1 << 10, + IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, + IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, + IB_PORT_CM_SUP = 1 << 16, + IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, + IB_PORT_REINIT_SUP = 1 << 18, + IB_PORT_DEVICE_MGMT_SUP = 1 << 19, + IB_PORT_VENDOR_CLASS_SUP = 1 << 20, + IB_PORT_DR_NOTICE_SUP = 1 << 21, + IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, + IB_PORT_BOOT_MGMT_SUP = 1 << 23, + IB_PORT_LINK_LATENCY_SUP = 1 << 24, + IB_PORT_CLIENT_REG_SUP = 1 << 25 +}; + +enum ib_port_width { + IB_WIDTH_1X = 1, + IB_WIDTH_4X = 2, + IB_WIDTH_8X = 4, + IB_WIDTH_12X = 8 +}; + +static inline int ib_width_enum_to_int(enum ib_port_width width) +{ + switch (width) { + case IB_WIDTH_1X: return 1; + case IB_WIDTH_4X: return 4; + case IB_WIDTH_8X: return 8; + case IB_WIDTH_12X: return 12; + default: return -1; + } +} + +struct ib_port_attr { + enum ib_port_state state; + enum ib_mtu max_mtu; + enum ib_mtu active_mtu; + int gid_tbl_len; + u32 port_cap_flags; + u32 max_msg_sz; + u32 bad_pkey_cntr; + u32 qkey_viol_cntr; + u16 pkey_tbl_len; + u16 lid; + u16 sm_lid; + u8 lmc; + u8 max_vl_num; + u8 sm_sl; + u8 subnet_timeout; + u8 init_type_reply; + u8 active_width; + u8 active_speed; + u8 phys_state; +}; + +enum ib_device_modify_flags { + IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 +}; + +struct ib_device_modify { + u64 sys_image_guid; +}; + +enum ib_port_modify_flags { + IB_PORT_SHUTDOWN = 1, + IB_PORT_INIT_TYPE = (1<<2), + IB_PORT_RESET_QKEY_CNTR = (1<<3) +}; + +struct ib_port_modify { + u32 set_port_cap_mask; + u32 clr_port_cap_mask; + u8 init_type; +}; + +enum ib_event_type { + IB_EVENT_CQ_ERR = IB_AE_CQ_ERROR, + IB_EVENT_QP_FATAL = IB_AE_QP_FATAL, + IB_EVENT_QP_REQ_ERR = IB_AE_WQ_REQ_ERROR, + IB_EVENT_QP_ACCESS_ERR = IB_AE_WQ_ACCESS_ERROR, + IB_EVENT_COMM_EST = IB_AE_QP_COMM, + IB_EVENT_SQ_DRAINED = IB_AE_SQ_DRAINED, + IB_EVENT_PATH_MIG = IB_AE_QP_APM, + IB_EVENT_PATH_MIG_ERR = IB_AE_QP_APM_ERROR, + IB_EVENT_DEVICE_FATAL = IB_AE_LOCAL_FATAL, + IB_EVENT_PORT_ACTIVE = IB_AE_PORT_ACTIVE, + IB_EVENT_PORT_ERR = IB_AE_PORT_DOWN, + IB_EVENT_LID_CHANGE = IB_AE_UNKNOWN + 1, + IB_EVENT_PKEY_CHANGE, + IB_EVENT_SM_CHANGE, + IB_EVENT_SRQ_ERR, + IB_EVENT_SRQ_LIMIT_REACHED, + IB_EVENT_QP_LAST_WQE_REACHED +}; + +struct ib_event { + struct ib_device *device; + union { + struct ib_cq *cq; + struct ib_qp *qp; + struct ib_srq *srq; + u8 port_num; + } element; + enum ib_event_type event; +}; + +struct ib_event_handler { + struct ib_device *device; + void (*handler)(struct ib_event_handler *, struct ib_event *); + struct list_head list; +}; + +#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ + do { \ + (_ptr)->device = _device; \ + (_ptr)->handler = _handler; \ + INIT_LIST_HEAD(&(_ptr)->list); \ + } while (0) + +struct ib_global_route { + union ib_gid dgid; + u32 flow_label; + u8 sgid_index; + u8 hop_limit; + u8 traffic_class; +}; + +struct ib_grh { + __be32 version_tclass_flow; + __be16 paylen; + u8 next_hdr; + u8 hop_limit; + union ib_gid sgid; + union ib_gid dgid; +}; + +enum { + IB_MULTICAST_QPN = 0xffffff +}; + +#ifdef LINUX_TO_BE_REMOVED +// defined in ib_types.h +#define IB_LID_PERMISSIVE cl_hton16(0xFFFF) +#endif + +enum ib_ah_flags { + IB_AH_GRH = 1 +}; + +struct ib_ah_attr { + struct ib_global_route grh; + u16 dlid; + u8 sl; + u8 src_path_bits; + u8 static_rate; + u8 ah_flags; + u8 port_num; +}; + +enum ib_wc_status { + IB_WC_SUCCESS, + IB_WC_LOC_LEN_ERR, + IB_WC_LOC_QP_OP_ERR, + IB_WC_LOC_EEC_OP_ERR, + IB_WC_LOC_PROT_ERR, + IB_WC_WR_FLUSH_ERR, + IB_WC_MW_BIND_ERR, + IB_WC_BAD_RESP_ERR, + IB_WC_LOC_ACCESS_ERR, + IB_WC_REM_INV_REQ_ERR, + IB_WC_REM_ACCESS_ERR, + IB_WC_REM_OP_ERR, + IB_WC_RETRY_EXC_ERR, + IB_WC_RNR_RETRY_EXC_ERR, + IB_WC_LOC_RDD_VIOL_ERR, + IB_WC_REM_INV_RD_REQ_ERR, + IB_WC_REM_ABORT_ERR, + IB_WC_INV_EECN_ERR, + IB_WC_INV_EEC_STATE_ERR, + IB_WC_FATAL_ERR, + IB_WC_RESP_TIMEOUT_ERR, + IB_WC_GENERAL_ERR +}; + +#ifdef LINUX_TO_BE_REMOVED +// defined in ib_types.h +enum ib_wc_opcode { + IB_WC_SEND, + IB_WC_RDMA_WRITE, + IB_WC_RDMA_READ, + IB_WC_COMP_SWAP, + IB_WC_FETCH_ADD, + IB_WC_BIND_MW, +/* + * Set value of IB_WC_RECV so consumers can test if a completion is a + * receive by testing (opcode & IB_WC_RECV). + */ + IB_WC_RECV = 1 << 7, + IB_WC_RECV_RDMA_WITH_IMM +}; +#endif + +enum ib_wc_flags { + IB_WC_GRH = 1, + IB_WC_WITH_IMM = (1<<1) +}; + +struct ib_wc { + u64 wr_id; + enum ib_wc_status status; + enum ib_wc_opcode opcode; + u32 vendor_err; + u32 byte_len; + __be32 imm_data; + u32 qp_num; + u32 src_qp; + int wc_flags; + u16 pkey_index; + u16 slid; + u8 sl; + u8 dlid_path_bits; + u8 port_num; /* valid only for DR SMPs on switches */ +}; + +enum ib_cq_notify { + IB_CQ_SOLICITED, + IB_CQ_NEXT_COMP +}; + +enum ib_srq_attr_mask { + IB_SRQ_MAX_WR = 1 << 0, + IB_SRQ_LIMIT = 1 << 1, +}; + +struct ib_srq_attr { + u32 max_wr; + u32 max_sge; + u32 srq_limit; +}; + +struct ib_srq_init_attr { + void (*event_handler)(struct ib_event *, void *); + void *srq_context; + struct ib_srq_attr attr; +}; + +struct ib_qp_cap { + u32 max_send_wr; + u32 max_recv_wr; + u32 max_send_sge; + u32 max_recv_sge; + u32 max_inline_data; +}; + +enum ib_sig_type { + IB_SIGNAL_ALL_WR, + IB_SIGNAL_REQ_WR +}; + +#ifdef LINUX_TO_BE_REMOVED +// defined in ib_types.h +enum ib_qp_type_t { + /* + * IB_QPT_QP0 and IB_QPT_QP1 have to be the first two entries + * here (and in that order) since the MAD layer uses them as + * indices into a 2-entry table. + */ + IB_QPT_QP0, + IB_QPT_QP1, + + IB_QPT_RELIABLE_CONN, + IB_QPT_UNRELIABLE_CONN, + IB_QPT_UNRELIABLE_DGRM, + IB_QPT_RAW_IPV6, + IB_QPT_RAW_ETY +}; +#endif + +struct ib_qp_init_attr { + void (*event_handler)(struct ib_event *, void *); + void *qp_context; + struct ib_cq *send_cq; + struct ib_cq *recv_cq; + struct ib_srq *srq; + struct ib_qp_cap cap; + enum ib_sig_type sq_sig_type; + enum ib_qp_type_t qp_type; + u8 port_num; /* special QP types only */ +}; + +enum ib_rnr_timeout { + IB_RNR_TIMER_655_36 = 0, + IB_RNR_TIMER_000_01 = 1, + IB_RNR_TIMER_000_02 = 2, + IB_RNR_TIMER_000_03 = 3, + IB_RNR_TIMER_000_04 = 4, + IB_RNR_TIMER_000_06 = 5, + IB_RNR_TIMER_000_08 = 6, + IB_RNR_TIMER_000_12 = 7, + IB_RNR_TIMER_000_16 = 8, + IB_RNR_TIMER_000_24 = 9, + IB_RNR_TIMER_000_32 = 10, + IB_RNR_TIMER_000_48 = 11, + IB_RNR_TIMER_000_64 = 12, + IB_RNR_TIMER_000_96 = 13, + IB_RNR_TIMER_001_28 = 14, + IB_RNR_TIMER_001_92 = 15, + IB_RNR_TIMER_002_56 = 16, + IB_RNR_TIMER_003_84 = 17, + IB_RNR_TIMER_005_12 = 18, + IB_RNR_TIMER_007_68 = 19, + IB_RNR_TIMER_010_24 = 20, + IB_RNR_TIMER_015_36 = 21, + IB_RNR_TIMER_020_48 = 22, + IB_RNR_TIMER_030_72 = 23, + IB_RNR_TIMER_040_96 = 24, + IB_RNR_TIMER_061_44 = 25, + IB_RNR_TIMER_081_92 = 26, + IB_RNR_TIMER_122_88 = 27, + IB_RNR_TIMER_163_84 = 28, + IB_RNR_TIMER_245_76 = 29, + IB_RNR_TIMER_327_68 = 30, + IB_RNR_TIMER_491_52 = 31 +}; + +enum ib_qp_attr_mask { + IB_QP_STATE = 1, + IB_QP_CUR_STATE = (1<<1), + IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), + IB_QP_ACCESS_FLAGS = (1<<3), + IB_QP_PKEY_INDEX = (1<<4), + IB_QP_PORT = (1<<5), + IB_QP_QKEY = (1<<6), + IB_QP_AV = (1<<7), + IB_QP_PATH_MTU = (1<<8), + IB_QP_TIMEOUT = (1<<9), + IB_QP_RETRY_CNT = (1<<10), + IB_QP_RNR_RETRY = (1<<11), + IB_QP_RQ_PSN = (1<<12), + IB_QP_MAX_QP_RD_ATOMIC = (1<<13), + IB_QP_ALT_PATH = (1<<14), + IB_QP_MIN_RNR_TIMER = (1<<15), + IB_QP_SQ_PSN = (1<<16), + IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), + IB_QP_PATH_MIG_STATE = (1<<18), + IB_QP_CAP = (1<<19), + IB_QP_DEST_QPN = (1<<20) +}; + +enum ib_mig_state { + IB_MIG_MIGRATED, + IB_MIG_REARM, + IB_MIG_ARMED +}; + +struct ib_qp_attr { + enum ib_qp_state qp_state; + enum ib_qp_state cur_qp_state; + enum ib_mtu path_mtu; + enum ib_mig_state path_mig_state; + u32 qkey; + u32 rq_psn; + u32 sq_psn; + u32 dest_qp_num; + int qp_access_flags; + struct ib_qp_cap cap; + struct ib_ah_attr ah_attr; + struct ib_ah_attr alt_ah_attr; + u16 pkey_index; + u16 alt_pkey_index; + u8 en_sqd_async_notify; + u8 sq_draining; + u8 max_rd_atomic; + u8 max_dest_rd_atomic; + u8 min_rnr_timer; + u8 port_num; + u8 timeout; + u8 retry_cnt; + u8 rnr_retry; + u8 alt_port_num; + u8 alt_timeout; +}; + +enum ib_wr_opcode { + IB_WR_RDMA_WRITE, + IB_WR_RDMA_WRITE_WITH_IMM, + IB_WR_SEND, + IB_WR_SEND_WITH_IMM, + IB_WR_RDMA_READ, + IB_WR_ATOMIC_CMP_AND_SWP, + IB_WR_ATOMIC_FETCH_AND_ADD +}; + +enum ib_send_flags { + IB_SEND_FENCE = 1, + IB_SEND_SIGNALED = (1<<1), + IB_SEND_SOLICITED = (1<<2), + IB_SEND_INLINE = (1<<3) +}; + +struct ib_sge { + u64 addr; + u32 length; + u32 lkey; +}; + +struct ib_send_wr { + struct ib_send_wr *next; + u64 wr_id; + struct ib_sge *sg_list; + int num_sge; + enum ib_wr_opcode opcode; + int send_flags; + __be32 imm_data; + union { + struct { + u64 remote_addr; + u32 rkey; + } rdma; + struct { + u64 remote_addr; + u64 compare_add; + u64 swap; + u32 rkey; + } atomic; + struct { + struct ib_ah *ah; + struct ib_mad_hdr *mad_hdr; + u32 remote_qpn; + u32 remote_qkey; + int timeout_ms; /* valid for MADs only */ + int retries; /* valid for MADs only */ + u16 pkey_index; /* valid for GSI only */ + u8 port_num; /* valid for DR SMPs on switch only */ + } ud; + } wr; +}; + +struct ib_recv_wr { + struct ib_recv_wr *next; + u64 wr_id; + struct ib_sge *sg_list; + int num_sge; +}; + +enum ib_access_flags { + IB_ACCESS_LOCAL_WRITE = 1, + IB_ACCESS_REMOTE_WRITE = (1<<1), + IB_ACCESS_REMOTE_READ = (1<<2), + IB_ACCESS_REMOTE_ATOMIC = (1<<3), + IB_ACCESS_MW_BIND = (1<<4) +}; + +struct ib_phys_buf { + u64 addr; + u64 size; +}; + +struct ib_mr_attr { + struct ib_pd *pd; + u64 device_virt_addr; + u64 size; + int mr_access_flags; + u32 lkey; + u32 rkey; +}; + +enum ib_mr_rereg_flags { + IB_MR_REREG_TRANS = 1, + IB_MR_REREG_PD = (1<<1), + IB_MR_REREG_ACCESS = (1<<2) +}; + +struct ib_mw_bind { + struct ib_mr *mr; + u64 wr_id; + u64 addr; + u32 length; + int send_flags; + int mw_access_flags; +}; + +struct ib_fmr_attr { + int max_pages; + int max_maps; + u8 page_size; +}; + +struct ib_ucontext { + struct ib_device *device; + struct list_head pd_list; + struct list_head mr_list; + struct list_head mw_list; + struct list_head cq_list; + struct list_head qp_list; + struct list_head srq_list; + struct list_head ah_list; + spinlock_t lock; +}; + +struct ib_uobject { + u64 user_handle; /* handle given to us by userspace */ + struct ib_ucontext *context; /* associated user context */ + struct list_head list; /* link to context's list */ + u32 id; /* index into kernel idr */ +}; + +struct ib_umem { + //NB: type changed from 'unsigned long' + u64 user_base; + //NB: type changed from 'unsigned long' + u64 virt_base; + size_t length; + int offset; + int page_size; + int writable; + struct list_head chunk_list; +}; + +struct ib_umem_chunk { + struct list_head list; + int nents; + int nmap; + struct scatterlist page_list[0]; +}; + +struct ib_udata { + void __user *inbuf; + void __user *outbuf; + size_t inlen; + size_t outlen; +}; + +#define IB_UMEM_MAX_PAGE_CHUNK \ + ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \ + ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \ + (void *) &((struct ib_umem_chunk *) 0)->page_list[0])) + +struct ib_umem_object { + struct ib_uobject uobject; + struct ib_umem umem; +}; + +struct ib_pd { + struct ib_device *device; + struct ib_uobject *uobject; + atomic_t usecnt; /* count all resources */ +}; + +struct ib_ah { + struct ib_device *device; + struct ib_pd *pd; + struct ib_uobject *uobject; +}; + +typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); + +struct ib_cq { + struct ib_device *device; + struct ib_uobject *uobject; + ib_comp_handler comp_handler; + void (*event_handler)(struct ib_event *, void *); + void * cq_context; + int cqe; + atomic_t usecnt; /* count number of work queues */ +}; + +struct ib_srq { + struct ib_device *device; + struct ib_pd *pd; + struct ib_uobject *uobject; + void (*event_handler)(struct ib_event *, void *); + void *srq_context; + atomic_t usecnt; +}; + +struct ib_qp { + struct ib_device *device; + struct ib_pd *pd; + struct ib_cq *send_cq; + struct ib_cq *recv_cq; + struct ib_srq *srq; + struct ib_uobject *uobject; + void (*event_handler)(struct ib_event *, void *); + void *qp_context; + u32 qp_num; + enum ib_qp_type_t qp_type; +}; + +struct ib_mr { + struct ib_device *device; + struct ib_pd *pd; + struct ib_uobject *uobject; + u32 lkey; + u32 rkey; + atomic_t usecnt; /* count number of MWs */ +}; + +struct ib_mw { + struct ib_device *device; + struct ib_pd *pd; + struct ib_uobject *uobject; + u32 rkey; +}; + +struct ib_fmr { + struct ib_device *device; + struct ib_pd *pd; + struct list_head list; + u32 lkey; + u32 rkey; +}; + +struct ib_mad; +struct ib_grh; + +enum ib_process_mad_flags { + IB_MAD_IGNORE_MKEY = 1, + IB_MAD_IGNORE_BKEY = 2, + IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY +}; + +enum ib_mad_result { + IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ + IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ + IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ + IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ +}; + +#define IB_DEVICE_NAME_MAX 64 + +struct ib_cache { + rwlock_t lock; + struct ib_event_handler event_handler; + struct ib_pkey_cache **pkey_cache; + struct ib_gid_cache **gid_cache; +}; + +struct mthca_dev; + +struct ib_device { + struct mthca_dev *mdev; + + char name[IB_DEVICE_NAME_MAX]; + + struct list_head event_handler_list; + spinlock_t event_handler_lock; + + struct list_head core_list; + struct list_head client_data_list; + spinlock_t client_data_lock; + + struct ib_cache cache; + + u32 flags; + + int (*query_device)(struct ib_device *device, + struct ib_device_attr *device_attr); + int (*query_port)(struct ib_device *device, + u8 port_num, + struct ib_port_attr *port_attr); + int (*query_gid)(struct ib_device *device, + u8 port_num, int index, + union ib_gid *gid); + int (*query_pkey)(struct ib_device *device, + u8 port_num, u16 index, u16 *pkey); + int (*modify_device)(struct ib_device *device, + int device_modify_mask, + struct ib_device_modify *device_modify); + int (*modify_port)(struct ib_device *device, + u8 port_num, int port_modify_mask, + struct ib_port_modify *port_modify); + struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device, + struct ib_udata *udata); + int (*dealloc_ucontext)(struct ib_ucontext *context); + int (*mmap)(struct ib_ucontext *context, + struct vm_area_struct *vma); + struct ib_pd * (*alloc_pd)(struct ib_device *device, + struct ib_ucontext *context, + struct ib_udata *udata); + int (*dealloc_pd)(struct ib_pd *pd); + struct ib_ah * (*create_ah)(struct ib_pd *pd, + struct ib_ah_attr *ah_attr); + int (*modify_ah)(struct ib_ah *ah, + struct ib_ah_attr *ah_attr); + int (*query_ah)(struct ib_ah *ah, + struct ib_ah_attr *ah_attr); + int (*destroy_ah)(struct ib_ah *ah); + struct ib_srq * (*create_srq)(struct ib_pd *pd, + struct ib_srq_init_attr *srq_init_attr, + struct ib_udata *udata); + int (*modify_srq)(struct ib_srq *srq, + struct ib_srq_attr *srq_attr, + enum ib_srq_attr_mask srq_attr_mask); + int (*query_srq)(struct ib_srq *srq, + struct ib_srq_attr *srq_attr); + int (*destroy_srq)(struct ib_srq *srq); + int (*post_srq_recv)(struct ib_srq *srq, + struct ib_recv_wr *recv_wr, + struct ib_recv_wr **bad_recv_wr); + struct ib_qp * (*create_qp)(struct ib_pd *pd, + struct ib_qp_init_attr *qp_init_attr, + struct ib_udata *udata); + int (*modify_qp)(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask); + int (*query_qp)(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr); + int (*destroy_qp)(struct ib_qp *qp); + int (*post_send)(struct ib_qp *qp, + struct ib_send_wr *send_wr, + struct ib_send_wr **bad_send_wr); + int (*post_recv)(struct ib_qp *qp, + struct ib_recv_wr *recv_wr, + struct ib_recv_wr **bad_recv_wr); + struct ib_cq * (*create_cq)(struct ib_device *device, int cqe, + struct ib_ucontext *context, + struct ib_udata *udata); + int (*destroy_cq)(struct ib_cq *cq); + int (*resize_cq)(struct ib_cq *cq, int *cqe); + int (*poll_cq)(struct ib_cq *cq, int num_entries, + struct ib_wc *wc); + int (*peek_cq)(struct ib_cq *cq, int wc_cnt); + int (*req_notify_cq)(struct ib_cq *cq, + enum ib_cq_notify cq_notify); + int (*req_ncomp_notif)(struct ib_cq *cq, + int wc_cnt); + struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, + int mr_access_flags); + struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, + int mr_access_flags, + u64 *iova_start); + struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, + struct ib_umem *region, + int mr_access_flags, + struct ib_udata *udata); + int (*query_mr)(struct ib_mr *mr, + struct ib_mr_attr *mr_attr); + int (*dereg_mr)(struct ib_mr *mr); + int (*rereg_phys_mr)(struct ib_mr *mr, + int mr_rereg_mask, + struct ib_pd *pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, + int mr_access_flags, + u64 *iova_start); + struct ib_mw * (*alloc_mw)(struct ib_pd *pd); + int (*bind_mw)(struct ib_qp *qp, + struct ib_mw *mw, + struct ib_mw_bind *mw_bind); + int (*dealloc_mw)(struct ib_mw *mw); + struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, + int mr_access_flags, + struct ib_fmr_attr *fmr_attr); + int (*map_phys_fmr)(struct ib_fmr *fmr, + u64 *page_list, int list_len, + u64 iova); + int (*unmap_fmr)(struct list_head *fmr_list); + int (*dealloc_fmr)(struct ib_fmr *fmr); + int (*attach_mcast)(struct ib_qp *qp, + union ib_gid *gid, + u16 lid); + int (*detach_mcast)(struct ib_qp *qp, + union ib_gid *gid, + u16 lid); + int (*process_mad)(struct ib_device *device, + int process_mad_flags, + u8 port_num, + struct ib_wc *in_wc, + struct ib_grh *in_grh, + struct ib_mad *in_mad, + struct ib_mad *out_mad); + +#ifdef LINUX_TO_BE_REMOVED + struct module *owner; + struct class_device class_dev; + struct kobject ports_parent; +#endif + struct list_head port_list; + + u8 node_type; + u8 phys_port_cnt; +}; + +struct ib_client { + char *name; + void (*add) (struct ib_device *); + void (*remove)(struct ib_device *); + + struct list_head list; +}; + +struct ib_device *ib_alloc_device(size_t size); +void ib_dealloc_device(struct ib_device *device); + +int ib_register_device (struct ib_device *device); +void ib_unregister_device(struct ib_device *device); + +int ib_register_client (struct ib_client *client); +void ib_unregister_client(struct ib_client *client); + +void *ib_get_client_data(struct ib_device *device, struct ib_client *client); +void ib_set_client_data(struct ib_device *device, struct ib_client *client, + void *data); + +int ib_core_init(void); + +void ib_core_cleanup(void); + +static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) +{ +#ifdef LINUX_TO_BE_CHANGED + return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; +#else + printk( KERN_ERROR " ib_copy_from_udata: copy_from_user not ported \n" ); + return 0; +#endif +} + +static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) +{ +#ifdef LINUX_TO_BE_CHANGED + return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; +#else + printk( KERN_ERROR " ib_copy_to_udata: copy_to_user not ported \n" ); + return 0; +#endif +} + +int ib_register_event_handler (struct ib_event_handler *event_handler); +int ib_unregister_event_handler(struct ib_event_handler *event_handler); +void ib_dispatch_event(struct ib_event *event); + +int ib_query_device(struct ib_device *device, + struct ib_device_attr *device_attr); + +int ib_query_port(struct ib_device *device, + u8 port_num, struct ib_port_attr *port_attr); + +int ib_query_gid(struct ib_device *device, + u8 port_num, int index, union ib_gid *gid); + +int ib_query_pkey(struct ib_device *device, + u8 port_num, u16 index, u16 *pkey); + +int ib_modify_device(struct ib_device *device, + int device_modify_mask, + struct ib_device_modify *device_modify); + +int ib_modify_port(struct ib_device *device, + u8 port_num, int port_modify_mask, + struct ib_port_modify *port_modify); + +/** + * ib_alloc_pd - Allocates an unused protection domain. + * @device: The device on which to allocate the protection domain. + * + * A protection domain object provides an association between QPs, shared + * receive queues, address handles, memory regions, and memory windows. + */ +struct ib_pd *ib_alloc_pd(struct ib_device *device); + +/** + * ib_dealloc_pd - Deallocates a protection domain. + * @pd: The protection domain to deallocate. + */ +int ib_dealloc_pd(struct ib_pd *pd); + +/** + * ib_create_ah - Creates an address handle for the given address vector. + * @pd: The protection domain associated with the address handle. + * @ah_attr: The attributes of the address vector. + * + * The address handle is used to reference a local or global destination + * in all UD QP post sends. + */ +struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); + +/** + * ib_create_ah_from_wc - Creates an address handle associated with the + * sender of the specified work completion. + * @pd: The protection domain associated with the address handle. + * @wc: Work completion information associated with a received message. + * @grh: References the received global route header. This parameter is + * ignored unless the work completion indicates that the GRH is valid. + * @port_num: The outbound port number to associate with the address. + * + * The address handle is used to reference a local or global destination + * in all UD QP post sends. + */ +struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc, + struct ib_grh *grh, u8 port_num); + +/** + * ib_modify_ah - Modifies the address vector associated with an address + * handle. + * @ah: The address handle to modify. + * @ah_attr: The new address vector attributes to associate with the + * address handle. + */ +int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); + +/** + * ib_query_ah - Queries the address vector associated with an address + * handle. + * @ah: The address handle to query. + * @ah_attr: The address vector attributes associated with the address + * handle. + */ +int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); + +/** + * ib_destroy_ah - Destroys an address handle. + * @ah: The address handle to destroy. + */ +int ib_destroy_ah(struct ib_ah *ah); + +/** + * ib_create_srq - Creates a SRQ associated with the specified protection + * domain. + * @pd: The protection domain associated with the SRQ. + * @srq_init_attr: A list of initial attributes required to create the SRQ. + * + * srq_attr->max_wr and srq_attr->max_sge are read the determine the + * requested size of the SRQ, and set to the actual values allocated + * on return. If ib_create_srq() succeeds, then max_wr and max_sge + * will always be at least as large as the requested values. + */ +struct ib_srq *ib_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *srq_init_attr); + +/** + * ib_modify_srq - Modifies the attributes for the specified SRQ. + * @srq: The SRQ to modify. + * @srq_attr: On input, specifies the SRQ attributes to modify. On output, + * the current values of selected SRQ attributes are returned. + * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ + * are being modified. + * + * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or + * IB_SRQ_LIMIT to set the SRQ's limit and request notification when + * the number of receives queued drops below the limit. + */ +int ib_modify_srq(struct ib_srq *srq, + struct ib_srq_attr *srq_attr, + enum ib_srq_attr_mask srq_attr_mask); + +/** + * ib_query_srq - Returns the attribute list and current values for the + * specified SRQ. + * @srq: The SRQ to query. + * @srq_attr: The attributes of the specified SRQ. + */ +int ib_query_srq(struct ib_srq *srq, + struct ib_srq_attr *srq_attr); + +/** + * ib_destroy_srq - Destroys the specified SRQ. + * @srq: The SRQ to destroy. + */ +int ib_destroy_srq(struct ib_srq *srq); + +/** + * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. + * @srq: The SRQ to post the work request on. + * @recv_wr: A list of work requests to post on the receive queue. + * @bad_recv_wr: On an immediate failure, this parameter will reference + * the work request that failed to be posted on the QP. + */ +static inline int ib_post_srq_recv(struct ib_srq *srq, + struct ib_recv_wr *recv_wr, + struct ib_recv_wr **bad_recv_wr) +{ + return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); +} + +/** + * ib_create_qp - Creates a QP associated with the specified protection + * domain. + * @pd: The protection domain associated with the QP. + * @qp_init_attr: A list of initial attributes required to create the QP. + */ +struct ib_qp *ib_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *qp_init_attr); + +/** + * ib_modify_qp - Modifies the attributes for the specified QP and then + * transitions the QP to the given state. + * @qp: The QP to modify. + * @qp_attr: On input, specifies the QP attributes to modify. On output, + * the current values of selected QP attributes are returned. + * @qp_attr_mask: A bit-mask used to specify which attributes of the QP + * are being modified. + */ +int ib_modify_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask); + +/** + * ib_query_qp - Returns the attribute list and current values for the + * specified QP. + * @qp: The QP to query. + * @qp_attr: The attributes of the specified QP. + * @qp_attr_mask: A bit-mask used to select specific attributes to query. + * @qp_init_attr: Additional attributes of the selected QP. + * + * The qp_attr_mask may be used to limit the query to gathering only the + * selected attributes. + */ +int ib_query_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr); + +/** + * ib_destroy_qp - Destroys the specified QP. + * @qp: The QP to destroy. + */ +int ib_destroy_qp(struct ib_qp *qp); + +/** + * ib_post_send - Posts a list of work requests to the send queue of + * the specified QP. + * @qp: The QP to post the work request on. + * @send_wr: A list of work requests to post on the send queue. + * @bad_send_wr: On an immediate failure, this parameter will reference + * the work request that failed to be posted on the QP. + */ +static inline int ib_post_send(struct ib_qp *qp, + struct ib_send_wr *send_wr, + struct ib_send_wr **bad_send_wr) +{ + return qp->device->post_send(qp, send_wr, bad_send_wr); +} + +/** + * ib_post_recv - Posts a list of work requests to the receive queue of + * the specified QP. + * @qp: The QP to post the work request on. + * @recv_wr: A list of work requests to post on the receive queue. + * @bad_recv_wr: On an immediate failure, this parameter will reference + * the work request that failed to be posted on the QP. + */ +static inline int ib_post_recv(struct ib_qp *qp, + struct ib_recv_wr *recv_wr, + struct ib_recv_wr **bad_recv_wr) +{ + return qp->device->post_recv(qp, recv_wr, bad_recv_wr); +} + +/** + * ib_create_cq - Creates a CQ on the specified device. + * @device: The device on which to create the CQ. + * @comp_handler: A user-specified callback that is invoked when a + * completion event occurs on the CQ. + * @event_handler: A user-specified callback that is invoked when an + * asynchronous event not associated with a completion occurs on the CQ. + * @cq_context: Context associated with the CQ returned to the user via + * the associated completion and event handlers. + * @cqe: The minimum size of the CQ. + * + * Users can examine the cq structure to determine the actual CQ size. + */ +struct ib_cq *ib_create_cq(struct ib_device *device, + ib_comp_handler comp_handler, + void (*event_handler)(struct ib_event *, void *), + void *cq_context, int cqe); + +/** + * ib_resize_cq - Modifies the capacity of the CQ. + * @cq: The CQ to resize. + * @cqe: The minimum size of the CQ. + * + * Users can examine the cq structure to determine the actual CQ size. + */ +int ib_resize_cq(struct ib_cq *cq, int cqe); + +/** + * ib_destroy_cq - Destroys the specified CQ. + * @cq: The CQ to destroy. + */ +int ib_destroy_cq(struct ib_cq *cq); + +/** + * ib_poll_cq - poll a CQ for completion(s) + * @cq:the CQ being polled + * @num_entries:maximum number of completions to return + * @wc:array of at least @num_entries &struct ib_wc where completions + * will be returned + * + * Poll a CQ for (possibly multiple) completions. If the return value + * is < 0, an error occurred. If the return value is >= 0, it is the + * number of completions returned. If the return value is + * non-negative and < num_entries, then the CQ was emptied. + */ +static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, + struct ib_wc *wc) +{ + return cq->device->poll_cq(cq, num_entries, wc); +} + +/** + * ib_peek_cq - Returns the number of unreaped completions currently + * on the specified CQ. + * @cq: The CQ to peek. + * @wc_cnt: A minimum number of unreaped completions to check for. + * + * If the number of unreaped completions is greater than or equal to wc_cnt, + * this function returns wc_cnt, otherwise, it returns the actual number of + * unreaped completions. + */ +int ib_peek_cq(struct ib_cq *cq, int wc_cnt); + +/** + * ib_req_notify_cq - Request completion notification on a CQ. + * @cq: The CQ to generate an event for. + * @cq_notify: If set to %IB_CQ_SOLICITED, completion notification will + * occur on the next solicited event. If set to %IB_CQ_NEXT_COMP, + * notification will occur on the next completion. + */ +static inline int ib_req_notify_cq(struct ib_cq *cq, + enum ib_cq_notify cq_notify) +{ + return cq->device->req_notify_cq(cq, cq_notify); +} + +/** + * ib_req_ncomp_notif - Request completion notification when there are + * at least the specified number of unreaped completions on the CQ. + * @cq: The CQ to generate an event for. + * @wc_cnt: The number of unreaped completions that should be on the + * CQ before an event is generated. + */ +static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) +{ + return cq->device->req_ncomp_notif ? + cq->device->req_ncomp_notif(cq, wc_cnt) : + -ENOSYS; +} + +/** + * ib_get_dma_mr - Returns a memory region for system memory that is + * usable for DMA. + * @pd: The protection domain associated with the memory region. + * @mr_access_flags: Specifies the memory access rights. + */ +struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); + +/** + * ib_reg_phys_mr - Prepares a virtually addressed memory region for use + * by an HCA. + * @pd: The protection domain associated assigned to the registered region. + * @phys_buf_array: Specifies a list of physical buffers to use in the + * memory region. + * @num_phys_buf: Specifies the size of the phys_buf_array. + * @mr_access_flags: Specifies the memory access rights. + * @iova_start: The offset of the region's starting I/O virtual address. + */ +struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, + int mr_access_flags, + u64 *iova_start); + +/** + * ib_rereg_phys_mr - Modifies the attributes of an existing memory region. + * Conceptually, this call performs the functions deregister memory region + * followed by register physical memory region. Where possible, + * resources are reused instead of deallocated and reallocated. + * @mr: The memory region to modify. + * @mr_rereg_mask: A bit-mask used to indicate which of the following + * properties of the memory region are being modified. + * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies + * the new protection domain to associated with the memory region, + * otherwise, this parameter is ignored. + * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this + * field specifies a list of physical buffers to use in the new + * translation, otherwise, this parameter is ignored. + * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this + * field specifies the size of the phys_buf_array, otherwise, this + * parameter is ignored. + * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this + * field specifies the new memory access rights, otherwise, this + * parameter is ignored. + * @iova_start: The offset of the region's starting I/O virtual address. + */ +int ib_rereg_phys_mr(struct ib_mr *mr, + int mr_rereg_mask, + struct ib_pd *pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, + int mr_access_flags, + u64 *iova_start); + +/** + * ib_query_mr - Retrieves information about a specific memory region. + * @mr: The memory region to retrieve information about. + * @mr_attr: The attributes of the specified memory region. + */ +int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); + +/** + * ib_dereg_mr - Deregisters a memory region and removes it from the + * HCA translation table. + * @mr: The memory region to deregister. + */ +int ib_dereg_mr(struct ib_mr *mr); + +/** + * ib_alloc_mw - Allocates a memory window. + * @pd: The protection domain associated with the memory window. + */ +struct ib_mw *ib_alloc_mw(struct ib_pd *pd); + +/** + * ib_bind_mw - Posts a work request to the send queue of the specified + * QP, which binds the memory window to the given address range and + * remote access attributes. + * @qp: QP to post the bind work request on. + * @mw: The memory window to bind. + * @mw_bind: Specifies information about the memory window, including + * its address range, remote access rights, and associated memory region. + */ +static inline int ib_bind_mw(struct ib_qp *qp, + struct ib_mw *mw, + struct ib_mw_bind *mw_bind) +{ + /* XXX reference counting in corresponding MR? */ + return mw->device->bind_mw ? + mw->device->bind_mw(qp, mw, mw_bind) : + -ENOSYS; +} + +/** + * ib_dealloc_mw - Deallocates a memory window. + * @mw: The memory window to deallocate. + */ +int ib_dealloc_mw(struct ib_mw *mw); + +/** + * ib_alloc_fmr - Allocates a unmapped fast memory region. + * @pd: The protection domain associated with the unmapped region. + * @mr_access_flags: Specifies the memory access rights. + * @fmr_attr: Attributes of the unmapped region. + * + * A fast memory region must be mapped before it can be used as part of + * a work request. + */ +struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, + int mr_access_flags, + struct ib_fmr_attr *fmr_attr); + +/** + * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. + * @fmr: The fast memory region to associate with the pages. + * @page_list: An array of physical pages to map to the fast memory region. + * @list_len: The number of pages in page_list. + * @iova: The I/O virtual address to use with the mapped region. + */ +static inline int ib_map_phys_fmr(struct ib_fmr *fmr, + u64 *page_list, int list_len, + u64 iova) +{ + return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); +} + +/** + * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. + * @fmr_list: A linked list of fast memory regions to unmap. + */ +int ib_unmap_fmr(struct list_head *fmr_list); + +/** + * ib_dealloc_fmr - Deallocates a fast memory region. + * @fmr: The fast memory region to deallocate. + */ +int ib_dealloc_fmr(struct ib_fmr *fmr); + +/** + * ib_attach_mcast - Attaches the specified QP to a multicast group. + * @qp: QP to attach to the multicast group. The QP must be type + * IB_QPT_UNRELIABLE_DGRM. + * @gid: Multicast group GID. + * @lid: Multicast group LID in host byte order. + * + * In order to send and receive multicast packets, subnet + * administration must have created the multicast group and configured + * the fabric appropriately. The port associated with the specified + * QP must also be a member of the multicast group. + */ +int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); + +/** + * ib_detach_mcast - Detaches the specified QP from a multicast group. + * @qp: QP to detach from the multicast group. + * @gid: Multicast group GID. + * @lid: Multicast group LID in host byte order. + */ +int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); + +#endif /* IB_VERBS_H */ diff --git a/branches/MTHCA/inc/kernel/mthca/mt_atomic.h b/branches/MTHCA/inc/kernel/mthca/mt_atomic.h new file mode 100644 index 00000000..4dcf5f30 --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/mt_atomic.h @@ -0,0 +1,58 @@ +#ifndef MT_ATOMIC_H +#define MT_ATOMIC_H + +// atomic +typedef LONG atomic_t; + +static inline void atomic_inc(atomic_t *pval) +{ + InterlockedIncrement(pval); +} + +static inline void atomic_dec(atomic_t *pval) +{ + InterlockedDecrement(pval); +} + +static inline atomic_t atomic_read(atomic_t *pval) +{ + return (atomic_t)InterlockedOr (pval,0); +} + +static inline void atomic_set(atomic_t *pval, long val) +{ + InterlockedExchange(pval, val); +} + +/** +* atomic_inc_and_test - decrement and test +* pval: pointer of type atomic_t +* +* Atomically increments pval by 1 and +* returns true if the result is 0, or false for all other +* cases. +*/ +static inline int +atomic_inc_and_test(atomic_t *pval) +{ + LONG val = InterlockedIncrement(pval); + return (val == 0); +} + +/** +* atomic_dec_and_test - decrement and test +* pval: pointer of type atomic_t +* +* Atomically decrements pval by 1 and +* returns true if the result is 0, or false for all other +* cases. +*/ +static inline int +atomic_dec_and_test(atomic_t *pval) +{ + LONG val = InterlockedDecrement(pval); + return (val == 0); +} + + +#endif diff --git a/branches/MTHCA/inc/kernel/mthca/mt_bitmap.h b/branches/MTHCA/inc/kernel/mthca/mt_bitmap.h new file mode 100644 index 00000000..0468285e --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/mt_bitmap.h @@ -0,0 +1,272 @@ +#ifndef MT_BITMAP_H +#define MT_BITMAP_H + +#ifdef WIN_TO_BE_REMOVED +unsigned long ul_mask[32] = { + 0x00000001, 0x00000002, 0x00000004, 0x00000008, + 0x00000010, 0x00000020, 0x00000040, 0x00000080, + 0x00000100, 0x00000200, 0x00000400, 0x00000800, + 0x00001000, 0x00002000, 0x00004000, 0x00008000, + 0x00010000, 0x00020000, 0x00040000, 0x00080000, + 0x00100000, 0x00200000, 0x00400000, 0x00800000, + 0x01000000, 0x02000000, 0x04000000, 0x08000000, + 0x10000000, 0x20000000, 0x40000000, 0x80000000 }; +#endif + +// DECLARE_BITMAP +#define BITS_PER_LONG 32 +#define BITS_TO_LONGS(bits) \ + (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) +#define DECLARE_BITMAP(name,bits) \ + unsigned long name[BITS_TO_LONGS(bits)] + +/** +* atomic_set_bit - Atomically set a bit in memory +* @nr: the bit to set +* @addr: the address to start counting from +* +* This function is atomic and may not be reordered. See __set_bit() +* if you do not require the atomic guarantees. +* +* Note: there are no guarantees that this function will not be reordered +* on non x86 architectures, so if you are writting portable code, +* make sure not to rely on its reordering guarantees. +* +* Note that @nr may be almost arbitrarily large; this function is not +* restricted to acting on a single-word quantity. +*/ +static inline unsigned long atomic_clear_bit(int nr, volatile unsigned long * addr) +{ + return InterlockedAnd( addr, ~(1 << nr) ); +} + +/** +* atomic_clear_bit - Clears a bit in memory +* @nr: Bit to clear +* @addr: Address to start counting from +* +* clear_bit() is atomic and may not be reordered. However, it does +* not contain a memory barrier, so if it is used for locking purposes, +* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() +* in order to ensure changes are visible on other processors. +*/ +static inline unsigned long atomic_set_bit(int nr, volatile unsigned long * addr) +{ + return InterlockedOr( addr, (1 << nr) ); +} + +static inline int set_bit(int nr,long * addr) +{ + addr += nr >> 5; + return atomic_set_bit( nr & 0x1f, addr ); +} + +static inline int clear_bit(int nr, long * addr) +{ + addr += nr >> 5; + return atomic_clear_bit( nr & 0x1f, addr ); +} + +static inline int test_bit(int nr, const unsigned long * addr) +{ + int mask; + + addr += nr >> 5; + mask = 1 << (nr & 0x1f); + return ((mask & *addr) != 0); +} + + +/** +* bitmap_zero - clear the bitmap +* @dst: the bitmap address +* @nbits: the bitmap size in bits +* +*/ +static inline void bitmap_zero(unsigned long *dst, int nbits) +{ + if (nbits <= BITS_PER_LONG) + *dst = 0UL; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + RtlZeroMemory(dst, len); + } +} + +#define BITMAP_LAST_WORD_MASK(nbits) \ + ( ((nbits) % BITS_PER_LONG) ? (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL ) + +int __bitmap_full(const unsigned long *bitmap, int bits); + +static inline int bitmap_full(const unsigned long *src, int nbits) +{ + if (nbits <= BITS_PER_LONG) + return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); + else + return __bitmap_full(src, nbits); +} + +int __bitmap_empty(const unsigned long *bitmap, int bits); + +static inline int bitmap_empty(const unsigned long *src, int nbits) +{ + if (nbits <= BITS_PER_LONG) + return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); + else + return __bitmap_empty(src, nbits); +} + +/* +* fls: find last bit set. +* returns: 0 - if not found or N+1, if found Nth bit +*/ + +static inline int fls(int x) +{ + int r = 32; + + if (!x) + return 0; + if (!(x & 0xffff0000u)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xff000000u)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xf0000000u)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xc0000000u)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000u)) { + x <<= 1; + r -= 1; + } + return r; +} + + +/** +* _ffs - find the first one bit in a word +* @addr: The address to start the search at +* @offset: The bitnumber to start searching at +* +* returns: 0 - if not found or N+1, if found Nth bit +*/ +static inline int _ffs(const unsigned long *addr, int offset) +{ + //TODO: not an effective code - is better in Assembler + int mask = 1 << offset; + int rbc = BITS_PER_LONG - offset; + int ix; + for (ix=0; ix +#include +#include +#include + +// ours - the order is important +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +// =========================================== +// SUBSTITUTIONS +// =========================================== + +#define BUG_ON(exp) do { ASSERT(!(exp)); /* in Linux follows here panic() !*/ } while(0) +#define WARN_ON(exp) do { ASSERT(!(exp)); /* in Linux follows here panic() !*/ } while(0) +#define snprintf _snprintf +#define printk DbgPrint +#define KERN_WARNING "warn: " +#define KERN_DEBUG "debg: " +#define KERN_INFO "info: " +#define KERN_ERROR "error: " +#if DBG +#define pr_debug printk +#else +#define pr_debug +#endif + +// memory barriers +#define wmb KeMemoryBarrier +#define rmb KeMemoryBarrier +#define mb KeMemoryBarrier + +// =========================================== +// LITERALS +// =========================================== + + + + +// =========================================== +// TYPES +// =========================================== + +// rw_lock +typedef spinlock_t rwlock_t; + +// dummy function +typedef void (*MT_EMPTY_FUNC)(); + +// =========================================== +// MACROS +// =========================================== + +// nullifying macros +#define MODULE_AUTHOR(a) +#define MODULE_DESCRIPTION(a) +#define MODULE_LICENSE(a) +#define MODULE_VERSION(a) +#define MODULE_PARM(p,a) +#define might_sleep() do {} while(0) + +#ifdef WIN_TO_BE_REMOVED +// compiler doesn't understand that +// min_t/max_t + #define min_t(type,x,y) \ + ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; }) +#define max_t(type,x,y) \ + ({ type __x = (x); type __y = (y); __x > __y ? __x: __y; }) +#endif + +// ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +// ALIGN +#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) + +// +// debug print +// + +// macro to print messages (so far - without checking printing level) +#define __MTHCA_PRINT(sev,dev) \ + char buf[512]; \ + const char *sevs[] = { "err", "warn", "info", "dbg" }; \ + va_list ap; \ + va_start (ap, format); \ + UNREFERENCED_PARAMETER(dev); \ + _vsnprintf (buf, sizeof(buf), format, ap); \ + va_end (ap); \ + DbgPrint( "hcavp (%s): %s", sevs[sev], buf) + +// severities +#define MTHCA_SEV_ERR 0 +#define MTHCA_SEV_WARN 1 +#define MTHCA_SEV_INFO 2 +#define MTHCA_SEV_DBG 3 + +// printing functions +struct mthca_dev; +static inline void __mthca_err(struct mthca_dev *dev, const char *format, ...) { __MTHCA_PRINT(MTHCA_SEV_ERR, dev); } +static inline void __mthca_warn(struct mthca_dev *dev, const char *format, ...) { __MTHCA_PRINT(MTHCA_SEV_INFO, dev); } +static inline void __mthca_info(struct mthca_dev *dev, const char *format, ...) { __MTHCA_PRINT(MTHCA_SEV_WARN, dev); } +static inline void __mthca_dbg(struct mthca_dev *dev, const char *format, ...) { __MTHCA_PRINT(MTHCA_SEV_DBG, dev); } + +// macros to eliminate some macros in release version +#define _mthca_err __mthca_err +#define _mthca_warn __mthca_warn +#define _mthca_info __mthca_info +#if DBG +#define _mthca_dbg __mthca_dbg +#else +#define _mthca_dbg +#endif + +SIZE_T strlcpy(char *dest, const char *src, SIZE_T size); +void MT_time_calibrate(); + +#define ERR_PTR(error) ((void*)(LONG_PTR)(error)) +#define PTR_ERR(ptr) ((long)(LONG_PTR)(void*)(ptr)) +//TODO: there are 2 assumptions here: +// - pointer can't be too big (around -1) +// - error can't be bigger than 1000 +#define IS_ERR(ptr) ((ULONG_PTR)ptr > (ULONG_PTR)-1000L) + +#endif diff --git a/branches/MTHCA/inc/kernel/mthca/mt_list.h b/branches/MTHCA/inc/kernel/mthca/mt_list.h new file mode 100644 index 00000000..35853f3c --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/mt_list.h @@ -0,0 +1,169 @@ +#ifndef MT_LIST_H +#define MT_LIST_H + +// taken from list.h + +/* + * These are non-NULL pointers that will result in page faults + * under normal circumstances, used to verify that nobody uses + * non-initialized list entries. + */ +#define LIST_POISON1 ((void *) 0x00100100) +#define LIST_POISON2 ((void *) 0x00200200) + +/* +* Simple doubly linked list implementation. +* +* Some of the internal functions ("__xxx") are useful when +* manipulating whole lists rather than single entries, as +* sometimes we already know the next/prev entries and we can +* generate better code by using them directly rather than +* using the generic single-entry routines. +*/ + +struct list_head { + struct list_head *next, *prev; +}; + +#define LIST_HEAD_INIT(name) { &(name), &(name) } + +#define LIST_HEAD(name) \ + struct list_head name = LIST_HEAD_INIT(name) + +#define INIT_LIST_HEAD(ptr) do { \ + (ptr)->next = (ptr); (ptr)->prev = (ptr); \ +} while (0) + + +/* +* Insert a new entry between two known consecutive entries. +* +* This is only for internal list manipulation where we know +* the prev/next entries already! +*/ +static inline void __list_add(struct list_head *new, + struct list_head *prev, + struct list_head *next) +{ + next->prev = new; + new->next = next; + new->prev = prev; + prev->next = new; +} + +/** +* list_add - add a new entry +* @new: new entry to be added +* @head: list head to add it after +* +* Insert a new entry after the specified head. +* This is good for implementing stacks. +*/ +static inline void list_add(struct list_head *new, struct list_head *head) +{ + __list_add(new, head, head->next); +} + +/** +* list_add_tail - add a new entry +* @new: new entry to be added +* @head: list head to add it before +* +* Insert a new entry before the specified head. +* This is useful for implementing queues. +*/ +static inline void list_add_tail(struct list_head *new, struct list_head *head) +{ + __list_add(new, head->prev, head); +} + + /* + * Delete a list entry by making the prev/next entries + * point to each other. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ + static inline void __list_del(struct list_head * prev, struct list_head * next) + { + next->prev = prev; + prev->next = next; + } + + /** + * list_del - deletes entry from list. + * @entry: the element to delete from the list. + * Note: list_empty on entry does not return true after this, the entry is + * in an undefined state. + */ + static inline void list_del(struct list_head *entry) + { + __list_del(entry->prev, entry->next); + entry->next = LIST_POISON1; + entry->prev = LIST_POISON2; + } + +/** +* list_empty - tests whether a list is empty +* @head: the list to test. +*/ +static inline int list_empty(const struct list_head *head) +{ + return head->next == head; +} + + /** + * list_entry - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + */ +#define list_entry(ptr, type, member) \ + container_of(ptr, type, member) + +//leo: macro changed out of unportable operator typeof +/** +* list_for_each_entry - iterate over list of given type +* @pos: the type * to use as a loop counter. +* @head: the head for your list. +* @member: the name of the list_struct within the struct. +* @type: typeof(*pos) +*/ +#define list_for_each_entry(pos, head, member,type) \ + for (pos = list_entry((head)->next, type, member); \ + &pos->member != (head); \ + pos = list_entry(pos->member.next, type, member)) + + +//leo: macro changed out of unportable operator typeof +/** +* list_for_each_entry_reverse - iterate backwards over list of given type. +* @pos: the type * to use as a loop counter. +* @head: the head for your list. +* @member: the name of the list_struct within the struct. +* @type: typeof(*pos) +*/ +#define list_for_each_entry_reverse(pos, head, member,type) \ + for (pos = list_entry((head)->prev, type, member); \ + &pos->member != (head); \ + pos = list_entry(pos->member.prev, type, member)) + + +//leo: macro changed out of unportable operator typeof +/** +* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry +* @pos: the type * to use as a loop counter. +* @n: another type * to use as temporary storage +* @head: the head for your list. +* @member: the name of the list_struct within the struct. +* @type: typeof(*pos) +* @type_n: typeof(*n) +*/ +#define list_for_each_entry_safe(pos, n, head, member,type,type_n) \ + for (pos = list_entry((head)->next, type, member), \ + n = list_entry(pos->member.next, type, member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, type_n, member)) + + +#endif diff --git a/branches/MTHCA/inc/kernel/mthca/mt_memory.h b/branches/MTHCA/inc/kernel/mthca/mt_memory.h new file mode 100644 index 00000000..1e88afe1 --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/mt_memory.h @@ -0,0 +1,275 @@ +#ifndef MT_MEMORY_H +#define MT_MEMORY_H + +// =========================================== +// CONSTANTS +// =========================================== + +#define MT_TAG_ATOMIC 'MOTA' +#define MT_TAG_KERNEL 'LNRK' +#define MT_TAG_HIGH 'HGIH' +#define MT_TAG_PCIPOOL 'PICP' +#define MT_TAG_IOMAP 'PAMI' + +// =========================================== +// SUBSTITUTIONS +// =========================================== + +#define memcpy_toio memcpy + +// =========================================== +// MACROS +// =========================================== + +#define PAGE_MASK (~(PAGE_SIZE-1)) + +// =========================================== +// SYSTEM MEMORY +// =========================================== + +// memory +#define __GFP_NOWARN 0 /* Suppress page allocation failure warning */ +#define __GFP_HIGHMEM 0 + +#define GFP_ATOMIC 1 /* can't wait (i.e. DPC or higher) */ +#define GFP_KERNEL 2 /* can wait (npaged) */ +#define GFP_HIGHUSER 4 /* GFP_KERNEL, that can be in HIGH memory */ + + +#define SLAB_ATOMIC GFP_ATOMIC +#define SLAB_KERNEL GFP_KERNEL + +#if 0 +//TODO: temporary - for finding a bug +static inline void * kmalloc( SIZE_T bsize, unsigned int gfp_mask) +{ + void *ptr; + MT_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL ); + switch (gfp_mask) { + case GFP_ATOMIC: + ptr = ExAllocatePoolWithTag( NonPagedPool, bsize, MT_TAG_ATOMIC ); + break; + case GFP_KERNEL: + ptr = ExAllocatePoolWithTag( NonPagedPool, bsize, MT_TAG_KERNEL ); + break; + case GFP_HIGHUSER: + ptr = ExAllocatePoolWithTag( NonPagedPool, bsize, MT_TAG_HIGH ); + break; + default: + DbgPrint("kmalloc: unsupported flag %d\n", gfp_mask); + ptr = NULL; + break; + } + return ptr; +} +#else +#define kmalloc(bsize,flags) ExAllocatePoolWithTag( NonPagedPool, bsize, MT_TAG_KERNEL ) +#endif + +static inline void kfree (const void *pobj) +{ + MT_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL ); + if (pobj) + ExFreePool((void *)pobj); +} + +static inline void * get_zeroed_page(unsigned int gfp_mask) +{ + void *ptr = kmalloc(PAGE_SIZE, gfp_mask); + if (ptr != NULL) + RtlZeroMemory( ptr, PAGE_SIZE); + return ptr; +} + +#define free_page(ptr) kfree(ptr) + + +// =========================================== +// IO SPACE <==> SYSTEM MEMORY +// =========================================== + + +/** +* ioremap - map bus memory into CPU space +* @offset: bus address of the memory +* @size: size of the resource to map +* +* ioremap performs a platform specific sequence of operations to +* make bus memory CPU accessible via the readb/readw/readl/writeb/ +* writew/writel functions and the other mmio helpers. The returned +* address is not guaranteed to be usable directly as a virtual +* address. +*/ +static inline void *ioremap(io_addr_t addr, SIZE_T size, SIZE_T* psize) +{ + PHYSICAL_ADDRESS pa; + void *va; + + MT_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL ); + pa.QuadPart = addr; + va = MmMapIoSpace( pa, size, MmNonCached ); + *psize = size; + return va; +} + +static inline void iounmap(void *va, SIZE_T size) +{ + MmUnmapIoSpace( va, size); +} + + // =========================================== + // DMA SUPPORT + // =========================================== + +#define PCI_DMA_BIDIRECTIONAL 0 +#define PCI_DMA_TODEVICE 1 +#define PCI_DMA_FROMDEVICE 2 +#define DMA_TO_DEVICE PCI_DMA_TODEVICE + + struct scatterlist { + void * page; /* kernel virtual address */ + unsigned int offset; /* offset in the first page */ + unsigned int length; /* buffer length */ + dma_addr_t dma_address; /* logical (device) address */ + PMDL p_mdl; /* MDL, if any (used for user space buffers) */ + }; + + #define sg_dma_address(sg) ((sg)->dma_address) + #define sg_dma_len(sg) ((sg)->length) + +void * alloc_pages( + IN struct mthca_dev *dev, + IN unsigned long cur_order, + OUT dma_addr_t *p_da); + +void free_pages( + IN struct mthca_dev *dev, + IN unsigned long cur_order, + IN void *va, + IN dma_addr_t da); + + void* alloc_dma_mem( + IN struct mthca_dev *dev, + IN unsigned long size, + OUT dma_addr_t *p_da); + + void free_dma_mem( + IN struct mthca_dev *dev, + IN unsigned long size, + IN void *va, + IN dma_addr_t da); + +#define alloc_page(dev,p_da) alloc_pages(dev,0,p_da) + +#define dma_alloc_coherent(dev,length,p_da,flags) \ + alloc_pages( dev, get_order(length), p_da ) + +#define dma_free_coherent(dev,length,va,da) \ + free_pages( dev, get_order(length), va, da ) + +//TODO: don't see diff between this and previous one. Is it right ? +#define pci_free_consistent(dev,length,va,da) \ + free_pages( dev, get_order(length), va, da ) + + static inline int pci_map_sg(struct mthca_dev *dev, + struct scatterlist *sg, int nents, int direction) + { + UNREFERENCED_PARAMETER(dev); + UNREFERENCED_PARAMETER(sg); + UNREFERENCED_PARAMETER(nents); + UNREFERENCED_PARAMETER(direction); + /* suppose, that pages where always translated to DMA space */ + return nents; /* i.e., we mapped all the entries */ + } + + static inline int pci_unmap_sg(struct mthca_dev *dev, + struct scatterlist *sg, int nents, int direction) + { + UNREFERENCED_PARAMETER(dev); + UNREFERENCED_PARAMETER(sg); + UNREFERENCED_PARAMETER(nents); + UNREFERENCED_PARAMETER(direction); + /* suppose, that pages where always translated to DMA space */ + return nents; /* i.e., we mapped all the entries */ + } + + static inline dma_addr_t pci_mape_page(struct mthca_dev *dev, + void *va, unsigned long offset, SIZE_T size, int direction) + { + UNREFERENCED_PARAMETER(dev); + UNREFERENCED_PARAMETER(va); + UNREFERENCED_PARAMETER(offset); + UNREFERENCED_PARAMETER(size); + UNREFERENCED_PARAMETER(direction); + /* suppose, that pages where always translated to DMA space */ + return 0; /* i.e., we unmapped all the entries */ + } + + // =========================================== + // HELPERS + // =========================================== + + static inline int get_order(unsigned long size) +{ + int order; + + size = (size-1) >> (PAGE_SHIFT-1); + order = -1; + do { + size >>= 1; + order++; + } while (size); + return order; +} + +static inline int long_log2(unsigned long x) +{ + int r = 0; + for (x >>= 1; x > 0; x >>= 1) + r++; + return r; +} + +static inline unsigned long roundup_pow_of_two(unsigned long x) +{ + return (1UL << fls(x - 1)); +} + +// =========================================== +// PROTOTYPES +// =========================================== + +void put_page(struct scatterlist *sg); +int get_user_pages( + IN struct mthca_dev *dev, /* device */ + IN u64 start, /* address in user space */ + IN int npages, /* size in pages */ + IN int write_access, /* access rights */ + OUT void **pages, /* mapped kernel address */ + OUT PMDL *p_mdl /* MDL */ + ); + +typedef struct _mt_iobuf { + u64 va; /* virtual address of the buffer */ + u64 size; /* size in bytes of the buffer */ + LIST_ENTRY seg_que; + u32 nr_pages; + int is_user; + int seg_num; +} mt_iobuf_t; + + +void iobuf_deregister(mt_iobuf_t *iobuf_p); +int iobuf_register( + IN u64 va, + IN u64 size, + IN int is_user, + IN int acc, + IN OUT mt_iobuf_t *iobuf_p); + + +unsigned long copy_from_user(void *to, const void __user *from, unsigned long n); +unsigned long copy_to_user(void __user *to, const void *from, unsigned long n); + + +#endif diff --git a/branches/MTHCA/inc/kernel/mthca/mt_pci.h b/branches/MTHCA/inc/kernel/mthca/mt_pci.h new file mode 100644 index 00000000..83947ef7 --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/mt_pci.h @@ -0,0 +1,115 @@ +#ifndef MT_PCI_H +#define MT_PCI_H + +// =========================================== +// LITERALS +// =========================================== + +#ifndef PCI_VENDOR_ID_MELLANOX +#define PCI_VENDOR_ID_MELLANOX 0x15b3 +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_TAVOR +#define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT +#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278 +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_ARBEL +#define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282 +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_SINAI_OLD +#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_SINAI +#define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274 +#endif + +#ifndef PCI_VENDOR_ID_TOPSPIN +#define PCI_VENDOR_ID_TOPSPIN 0x1867 +#endif + + +// =========================================== +// TYPES +// =========================================== + + +// =========================================== +// MACROS/FUNCTIONS +// =========================================== + +// get bar boundaries +#if 1 +#define pci_resource_start(dev,bar_num) ((dev)->ext->bar[bar_num].phys) +#define pci_resource_len(dev,bar_num) ((dev)->ext->bar[bar_num].size) +#else +static inline uint64_t pci_resource_start(struct mthca_dev *dev, int bar_num) +{ + return dev->ext->bar[bar_num].phys; +} +#endif + + +// i/o to registers + +static inline u64 readq(const volatile void __iomem *addr) +{ + //TODO: write atomic implementation of _IO_READ_QWORD and change mthca_doorbell.h + u64 val; + READ_REGISTER_BUFFER_ULONG((PULONG)(addr), (PULONG)&val, 2 ); + return val; +} + +static inline u32 readl(const volatile void __iomem *addr) +{ + return READ_REGISTER_ULONG((PULONG)(addr)); +} + +static inline u16 reads(const volatile void __iomem *addr) +{ + return READ_REGISTER_USHORT((PUSHORT)(addr)); +} + +static inline u8 readb(const volatile void __iomem *addr) +{ + return READ_REGISTER_UCHAR((PUCHAR)(addr)); +} + +#define __raw_readq readq +#define __raw_readl readl +#define __raw_reads reads +#define __raw_readb readb + +static inline void writeq(unsigned __int64 val, volatile void __iomem *addr) +{ + //TODO: write atomic implementation of _IO_WRITE_QWORD and change mthca_doorbell.h + WRITE_REGISTER_BUFFER_ULONG( (PULONG)(addr), (PULONG)&val, 2 ); +} + +static inline void writel(unsigned int val, volatile void __iomem *addr) +{ + WRITE_REGISTER_ULONG((PULONG)(addr),val); +} + +static inline void writes(unsigned short val, volatile void __iomem *addr) +{ + WRITE_REGISTER_USHORT((PUSHORT)(addr),val); +} + +static inline void writeb(unsigned char val, volatile void __iomem *addr) +{ + WRITE_REGISTER_UCHAR((PUCHAR)(addr),val); +} + +#define __raw_writeq writeq +#define __raw_writel writel +#define __raw_writes writes +#define __raw_writeb writeb + +#endif + diff --git a/branches/MTHCA/inc/kernel/mthca/mt_pcipool.h b/branches/MTHCA/inc/kernel/mthca/mt_pcipool.h new file mode 100644 index 00000000..1270293d --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/mt_pcipool.h @@ -0,0 +1,110 @@ +#ifndef MT_PCIPOOL_H +#define MT_PCIPOOL_H + +typedef struct pci_pool { +#ifdef WIN_TO_BE_REMOVED + struct list_head page_list; + spinlock_t lock; + size_t blocks_per_page; + size_t allocation; + wait_queue_head_t waitq; + struct list_head pools; +#endif + size_t size; + struct mthca_dev *mdev; + char name [32]; + NPAGED_LOOKASIDE_LIST pool_hdr; +} pci_pool_t; + +// taken from dmapool.c + +/** +* pci_pool_create - Creates a pool of consistent memory blocks, for dma. +* @name: name of pool, for diagnostics +* @mdev: device that will be doing the DMA +* @size: size of the blocks in this pool. +* @align: alignment requirement for blocks; must be a power of two +* @allocation: returned blocks won't cross this boundary (or zero) +* Context: !in_interrupt() +* +* Returns a dma allocation pool with the requested characteristics, or +* null if one can't be created. Given one of these pools, dma_pool_alloc() +* may be used to allocate memory. Such memory will all have "consistent" +* DMA mappings, accessible by the device and its driver without using +* cache flushing primitives. The actual size of blocks allocated may be +* larger than requested because of alignment. +* +* If allocation is nonzero, objects returned from dma_pool_alloc() won't + * cross that size boundary. This is useful for devices which have + * addressing restrictions on individual DMA transfers, such as not crossing + * boundaries of 4KBytes. + */ + +pci_pool_t * +pci_pool_create (const char *name, struct mthca_dev *mdev, + size_t size, size_t align, size_t allocation); + +/** + * dma_pool_alloc - get a block of consistent memory + * @pool: dma pool that will produce the block + * @mem_flags: GFP_* bitmask + * @handle: pointer to dma address of block + * + * This returns the kernel virtual address of a currently unused block, + * and reports its dma address through the handle. + * If such a memory block can't be allocated, null is returned. + */ +static inline void * +pci_pool_alloc (pci_pool_t *pool, int mem_flags, dma_addr_t *handle) +{ + PHYSICAL_ADDRESS pa; + void * ptr; + + MT_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL ); + + ptr = ExAllocateFromNPagedLookasideList( &pool->pool_hdr ); + if (ptr != NULL) { + pa = MmGetPhysicalAddress( ptr ); + *handle = pa.QuadPart; + } + return ptr; +} + + +/** +* dma_pool_free - put block back into dma pool +* @pool: the dma pool holding the block +* @vaddr: virtual address of block +* @dma: dma address of block +* +* Caller promises neither device nor driver will again touch this block +* unless it is first re-allocated. +*/ +static inline void +pci_pool_free (pci_pool_t *pool, void *vaddr, dma_addr_t dma) +{ + UNREFERENCED_PARAMETER(dma); + MT_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL ); + ExFreeToNPagedLookasideList( &pool->pool_hdr, vaddr ); +} + + + +/** + * pci_pool_destroy - destroys a pool of dma memory blocks. + * @pool: dma pool that will be destroyed + * Context: !in_interrupt() + * + * Caller guarantees that no more memory from the pool is in use, + * and that nothing will try to use the pool after this call. + */ +static inline void +pci_pool_destroy (pci_pool_t *pool) +{ + ExDeleteNPagedLookasideList( &pool->pool_hdr ); + ExFreePool( pool); +} + + + +#endif diff --git a/branches/MTHCA/inc/kernel/mthca/mt_spinlock.h b/branches/MTHCA/inc/kernel/mthca/mt_spinlock.h new file mode 100644 index 00000000..9b07e887 --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/mt_spinlock.h @@ -0,0 +1,124 @@ +#ifndef MT_SPINLOCK_H +#define MT_SPINLOCK_H + +typedef struct spinlock { + KSPIN_LOCK lock; + KLOCK_QUEUE_HANDLE lockh; +#ifdef SUPPORT_SPINLOCK_IRQ + PKINTERRUPT p_int_obj; + KIRQL irql; +#endif +} spinlock_t; + +#ifdef SUPPORT_SPINLOCK_IRQ + +static inline void +spin_lock_setint( + IN spinlock_t* const l, + IN PKINTERRUPT p_int_obj ) +{ + MT_ASSERT( l ); + l->p_int_obj = p_int_obj; +} + +static inline void spin_lock_irq_init( + IN spinlock_t* const l, + IN PKINTERRUPT int_obj + ) +{ + KeInitializeSpinLock( &l->lock ); + l->p_int_obj = int_obj; +} + +static inline unsigned long +spin_lock_irq( + IN spinlock_t* const l) +{ + MT_ASSERT( l ); + MT_ASSERT( l->p_int_obj ); + return (unsigned long)(l->irql = KeAcquireInterruptSpinLock ( l->p_int_obj )); +} + +static inline void +spin_unlock_irq( + IN spinlock_t* const p_spinlock ) +{ + MT_ASSERT( p_spinlock ); + MT_ASSERT( p_spinlock->p_int_obj ); + KeReleaseInterruptSpinLock ( p_spinlock->p_int_obj, p_spinlock->irql ); +} + +#endif + +static inline void spin_lock_init( + IN spinlock_t* const p_spinlock ) +{ + KeInitializeSpinLock( &p_spinlock->lock ); +} + +static inline void +spin_lock( + IN spinlock_t* const l ) +{ + MT_ASSERT( l ); + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + KeAcquireInStackQueuedSpinLock ( &l->lock, &l->lockh ); +} + +static inline void +spin_unlock( + IN spinlock_t* const l ) +{ + MT_ASSERT( l ); + ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL); + KeReleaseInStackQueuedSpinLock( &l->lockh ); +} + +static inline void +spin_lock_sync( + IN spinlock_t* const l ) +{ + MT_ASSERT( l ); + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + KeAcquireInStackQueuedSpinLock ( &l->lock, &l->lockh ); + KeReleaseInStackQueuedSpinLock( &l->lockh ); +} + +/* to be used only at DPC level */ +static inline void +spin_lock_dpc( + IN spinlock_t* const l ) +{ + MT_ASSERT( l ); + ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL); + KeAcquireInStackQueuedSpinLockAtDpcLevel( &l->lock, &l->lockh ); +} + +/* to be used only at DPC level */ +static inline void +spin_unlock_dpc( + IN spinlock_t* const l ) +{ + MT_ASSERT( l ); + ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL); + KeReleaseInStackQueuedSpinLockFromDpcLevel( &l->lockh ); +} + + +/* we are working from DPC level, so we can use usual spinlocks */ +#define spin_lock_irq spin_lock +#define spin_unlock_irq spin_unlock + +/* no diff in Windows */ +#define spin_lock_irqsave spin_lock_irq +#define spin_unlock_irqrestore spin_unlock_irq + +/* Windows doesn't support such kind of spinlocks so far, but may be tomorrow ... */ +#define rwlock_init spin_lock_init +#define read_lock_irqsave spin_lock_irqsave +#define read_unlock_irqrestore spin_unlock_irqrestore +#define write_lock_irq spin_lock_irq +#define write_unlock_irq spin_unlock_irq + +#endif + diff --git a/branches/MTHCA/inc/kernel/mthca/mt_sync.h b/branches/MTHCA/inc/kernel/mthca/mt_sync.h new file mode 100644 index 00000000..34e52633 --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/mt_sync.h @@ -0,0 +1,105 @@ +#ifndef MT_SYNC_H +#define MT_SYNC_H + +// literals +#ifndef LONG_MAX +#define LONG_MAX 2147483647L /* maximum (signed) long value */ +#endif + + +// mutex wrapper + +// suitable both for mutexes and semaphores +static inline void down(PRKMUTEX p_mutex) +{ + NTSTATUS status; + + ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); + while (1) { + status = KeWaitForSingleObject( p_mutex, Executive, KernelMode, FALSE, NULL ); + if (status == STATUS_SUCCESS) + break; + } +} + +// suitable both for mutexes and semaphores +static inline int down_interruptible(PRKMUTEX p_mutex) +{ + NTSTATUS status; + + ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); + status = KeWaitForSingleObject( p_mutex, Executive, KernelMode, TRUE, NULL ); + if (status == STATUS_SUCCESS) + return 0; + return -EINTR; +} + +#define sem_down(ptr) down((PRKMUTEX)(ptr)) +#define sem_down_interruptible(ptr) down_interruptible((PRKMUTEX)(ptr)) + +static inline void up(PRKMUTEX p_mutex) +{ + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + KeReleaseMutex( p_mutex, FALSE ); +} + +static inline void sem_up(PRKSEMAPHORE p_sem) +{ + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + KeReleaseSemaphore( p_sem, 0, 1, FALSE ); +} + +static inline void sem_init( + IN PRKSEMAPHORE p_sem, + IN LONG cnt, + IN LONG limit) +{ + ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL); + KeInitializeSemaphore( p_sem, cnt, limit ); +} + + +typedef struct wait_queue_head { + KEVENT event; +} wait_queue_head_t; + +static inline void wait_event(wait_queue_head_t *obj_p, int condition) +{ + NTSTATUS status; + MT_ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); + if (condition) + return; + while (1) { + status = KeWaitForSingleObject( &obj_p->event, Executive, KernelMode, FALSE, NULL ); + if (status == STATUS_SUCCESS) + break; + } +} + +static inline void wake_up(wait_queue_head_t *obj_p) +{ + MT_ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + KeSetEvent( &obj_p->event, 0, FALSE ); +} + +static inline void init_waitqueue_head(wait_queue_head_t *obj_p) +{ + ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL); + KeInitializeEvent( &obj_p->event, NotificationEvent , FALSE ); +} + +static inline void free_irq(PKINTERRUPT int_obj) +{ + IoDisconnectInterrupt( int_obj ); +} + +int request_irq( + IN CM_PARTIAL_RESOURCE_DESCRIPTOR *int_info, /* interrupt resources */ + IN KSPIN_LOCK *isr_lock, /* spin lcok for ISR */ + IN PKSERVICE_ROUTINE isr, /* ISR */ + IN void *isr_ctx, /* ISR context */ + OUT PKINTERRUPT *int_obj /* interrupt object */ + ); + + +#endif diff --git a/branches/MTHCA/inc/kernel/mthca/mt_time.h b/branches/MTHCA/inc/kernel/mthca/mt_time.h new file mode 100644 index 00000000..3738ecf0 --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/mt_time.h @@ -0,0 +1,54 @@ +#ifndef MT_TIME_H +#define MT_TIME_H + + +/* get time stamp */ +static inline volatile u64 MT_time_get_stamp(void) +{ + volatile u64 tsc; + +#if defined(_WIN64) && (defined(IA64) || defined(_IA64_)) + /* Itanium */ + + /* returns a value in units of 100 nsecs */ + tsc = KeQueryInterruptTime(); + +#elif defined(_WIN64) && (defined(AMD64) || defined(_AMD64_)) + /* x64 */ + + /* returns a value in units of Time-Stamp Counter (usually, clocks) */ + tsc = __rdtsc(); + +#elif defined(_WIN32) && (defined(i386) || defined(_x86_)) + /* x86 */ + + /* returns a value in units of Time-Stamp Counter (usually, clocks) */ + __asm { + lea ebx,tsc + rdtsc + mov [ebx],eax + mov [ebx+4],edx + } +#else + #error Unsupported platform +#endif + + return tsc; +} + +extern u64 mt_ticks_per_sec; + + +/* CONVERTIONS */ +#define MT_USECS_TO_TICKS(to) ((mt_ticks_per_sec * (to)) / 1000000 ) +#define MT_MSECS_TO_TICKS(to) MT_USECS_TO_TICKS(1000 * to) + +/* comparison */ +#define time_after(a,b) ((INT64)(b) - (INT64)(a) < 0) +#define time_before(a,b) time_after(b,a) +#define time_after_eq(a,b) ((INT64)(a) - (INT64)(b) >= 0) +#define time_before_eq(a,b) time_after_eq(b,a) + + +#endif + diff --git a/branches/MTHCA/inc/kernel/mthca/mt_types.h b/branches/MTHCA/inc/kernel/mthca/mt_types.h new file mode 100644 index 00000000..88a1474d --- /dev/null +++ b/branches/MTHCA/inc/kernel/mthca/mt_types.h @@ -0,0 +1,63 @@ +#ifndef MT_TYPES_H +#define MT_TYPES_H + +//#include + #include + +// =========================================== +// SUBSTITUTES +// =========================================== + +// gcc compiler attributes +#define __bitwise +#define __init +#define __exit +#define __devinit +#define __devexit +#define __nocast +#define __user +#define __iomem +#define __force +#define __devinitdata +#define __attribute__(a) +#define likely(x) (x) +#define unlikely(x) (x) + +// container_of +#define container_of CONTAINING_RECORD + +// inline +#define inline __inline + +// =========================================== +// TYPES +// =========================================== + +// basic types +typedef unsigned char u8; +typedef unsigned short int u16; +typedef unsigned int u32; +typedef unsigned __int64 u64; + +// inherited +typedef u16 __le16; +typedef u16 __be16; +typedef u32 __le32; +typedef u32 __be32; +typedef u64 __le64; +typedef u64 __be64; +typedef u64 dma_addr_t; +typedef u64 io_addr_t; + +// =========================================== +// MACROS +// =========================================== + +// assert +#ifdef _DEBUG_ +#define MT_ASSERT( exp ) (void)(!(exp)?DbgPrint("Assertion Failed:" #exp "\n"),DbgBreakPoint(),FALSE:TRUE) +#else +#define MT_ASSERT( exp ) +#endif /* _DEBUG_ */ + +#endif