+++ /dev/null
-TARGETNAME=ibal\r
-TARGETPATH=..\..\..\bin\kernel\obj$(BUILD_ALT_DIR)\r
-TARGETTYPE=DRIVER\r
-\r
-!if $(FREEBUILD)\r
-# TBD - will not build with EVENT_TRACING ? 5-3-08\r
-#ENABLE_EVENT_TRACING=1\r
-!else\r
-#ENABLE_EVENT_TRACING=1\r
-!endif\r
-\r
-SOURCES= ibal.rc \\r
- al_driver.c \\r
- al_ca_pnp.c \\r
- al_ci_ca.c \\r
- al_cm_cep.c \\r
- al_dev.c \\r
- al_ioc_pnp.c \\r
- al_mad_pool.c \\r
- al_fmr_pool.c \\r
- al_mgr.c \\r
- al_mr.c \\r
- al_pnp.c \\r
- al_proxy.c \\r
- al_proxy_cep.c \\r
- al_proxy_ioc.c \\r
- al_proxy_subnet.c \\r
- al_proxy_verbs.c \\r
- al_proxy_ndi.c \\r
- al_ndi_cq.c \\r
- al_ndi_cm.c \\r
- al_sa_req.c \\r
- al_smi.c \\r
- ..\al.c \\r
- ..\al_av.c \\r
- ..\al_ca.c \\r
- ..\al_ci_ca_shared.c \\r
- ..\al_cm_qp.c \\r
- ..\al_common.c \\r
- ..\al_cq.c \\r
- ..\al_dm.c \\r
- ..\al_init.c \\r
- ..\al_mad.c \\r
- ..\al_mcast.c \\r
- ..\al_mgr_shared.c \\r
- ..\al_mr_shared.c \\r
- ..\al_mw.c \\r
- ..\al_pd.c \\r
- ..\al_qp.c \\r
- ..\al_query.c \\r
- ..\al_reg_svc.c \\r
- ..\al_res_mgr.c \\r
- ..\al_srq.c \\r
- ..\al_sub.c \\r
- ..\ib_common.c \\r
- ..\ib_statustext.c\r
-\r
-INCLUDES=..;..\..\..\inc;..\..\..\inc\kernel;\r
-\r
-C_DEFINES=$(C_DEFINES) -DDRIVER -DDEPRECATE_DDK_FUNCTIONS \\r
- -DEXPORT_AL_SYMBOLS\r
-\r
-TARGETLIBS= \\r
- $(TARGETPATH)\*\complib.lib\r
- \r
-!if !defined(DDK_TARGET_OS) || "$(DDK_TARGET_OS)"=="Win2K"\r
-#\r
-# The driver is built in the Win2K build environment\r
-# - use the library version of safe strings \r
-#\r
-TARGETLIBS= $(TARGETLIBS) $(DDK_LIB_PATH)\ntstrsafe.lib\r
-!endif\r
-\r
-\r
-!IFDEF ENABLE_EVENT_TRACING\r
-\r
-C_DEFINES = $(C_DEFINES) -DEVENT_TRACING\r
-\r
-RUN_WPP = $(SOURCES) -km -ext: .c .h .C .H \\r
- -scan:..\al_debug.h \\r
- -func:AL_PRINT(LEVEL,FLAGS,(MSG,...)) \\r
- -func:AL_PRINT_EXIT(LEVEL,FLAGS,(MSG,...))\r
-!ENDIF\r
-\r
-MSC_WARNING_LEVEL= /W4\r
+++ /dev/null
-\r
-Look at WinVerbs query HCA code, then examine IPoIB QUERY AL interface and deduce a\r
-such that IBAL can QUERY HCAs for their CI (Channel interface).
\ No newline at end of file
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_ca_pnp.c 9 2005-05-23 22:38:08Z ftillier $\r
- */\r
-\r
-#include <iba/ib_al.h>\r
-\r
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_ca_pnp.h 9 2005-05-23 22:38:08Z ftillier $\r
- */\r
-\r
-#if !defined(__IB_AL_CA_PNP_H__)\r
-#define __IB_AL_CA_PNP_H__\r
-\r
-#endif /* __IB_AL_CA_PNP_H__ */\r
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_ci_ca.c 1088 2008-04-30 03:43:49Z shefty $\r
- */\r
-\r
-#include "al_ci_ca.h"\r
-#include "al_verbs.h"\r
-#include "al_cq.h"\r
-#include "al_debug.h"\r
-#if defined(EVENT_TRACING)\r
-#ifdef offsetof\r
-#undef offsetof\r
-#endif\r
-#include "al_ci_ca.tmh"\r
-#endif\r
-#include "al_mad_pool.h"\r
-#include "al_mgr.h"\r
-#include "al_mr.h"\r
-#include "al_pnp.h"\r
-#include "al_mad_pool.h"\r
-\r
-#include "ib_common.h"\r
-\r
-\r
-#define EVENT_POOL_MIN 4\r
-#define EVENT_POOL_MAX 0\r
-#define EVENT_POOL_GROW 1\r
-\r
-\r
-void\r
-destroying_ci_ca(\r
- IN al_obj_t* p_obj );\r
-\r
-void\r
-cleanup_ci_ca(\r
- IN al_obj_t* p_obj );\r
-\r
-void\r
-free_ci_ca(\r
- IN al_obj_t* p_obj );\r
-\r
-void\r
-ci_ca_comp_cb(\r
- IN void *cq_context );\r
-\r
-void\r
-ci_ca_async_proc_cb(\r
- IN struct _cl_async_proc_item *p_item );\r
-\r
-void\r
-ci_ca_async_event_cb(\r
- IN ib_event_rec_t* p_event_record );\r
-\r
-\r
-\r
-ib_api_status_t\r
-create_ci_ca(\r
- IN al_obj_t *p_parent_obj,\r
- IN const ci_interface_t* p_ci )\r
-{\r
- ib_api_status_t status;\r
- cl_status_t cl_status;\r
- al_ci_ca_t *p_ci_ca;\r
-\r
- AL_ENTER( AL_DBG_CA );\r
-\r
- CL_ASSERT( p_ci );\r
-\r
- /* Allocate the CI CA. */\r
- p_ci_ca = (al_ci_ca_t*)cl_zalloc( sizeof( al_ci_ca_t ) );\r
- if( !p_ci_ca )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("cl_zalloc failed\n") );\r
- return IB_INSUFFICIENT_MEMORY;\r
- }\r
-\r
- /* Construct the CI CA. */\r
- construct_al_obj( &p_ci_ca->obj, AL_OBJ_TYPE_CI_CA );\r
- cl_spinlock_construct( &p_ci_ca->attr_lock );\r
- cl_qlist_init( &p_ci_ca->ca_list );\r
- cl_qlist_init( &p_ci_ca->shmid_list );\r
- cl_qpool_construct( &p_ci_ca->event_pool );\r
- p_ci_ca->verbs = *p_ci;\r
-\r
- cl_status = cl_spinlock_init( &p_ci_ca->attr_lock );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- free_ci_ca( &p_ci_ca->obj );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("cl_spinlock_init failed, status = 0x%x.\n",\r
- ib_convert_cl_status( cl_status ) ) );\r
- return ib_convert_cl_status( cl_status );\r
- }\r
-\r
- /* Create a pool of items to report asynchronous events. */\r
- cl_status = cl_qpool_init( &p_ci_ca->event_pool, EVENT_POOL_MIN,\r
- EVENT_POOL_MAX, EVENT_POOL_GROW, sizeof( event_item_t ), NULL,\r
- NULL, p_ci_ca );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- free_ci_ca( &p_ci_ca->obj );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("cl_qpool_init failed, status = 0x%x.\n", \r
- ib_convert_cl_status( cl_status ) ) );\r
- return ib_convert_cl_status( cl_status );\r
- }\r
-\r
- status = init_al_obj( &p_ci_ca->obj, p_ci_ca, FALSE,\r
- destroying_ci_ca, cleanup_ci_ca, free_ci_ca );\r
- if( status != IB_SUCCESS )\r
- {\r
- free_ci_ca( &p_ci_ca->obj );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("init_al_obj failed, status = 0x%x.\n", status) );\r
- return status;\r
- }\r
- status = attach_al_obj( p_parent_obj, &p_ci_ca->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- p_ci_ca->dereg_async_item.pfn_callback = ci_ca_async_proc_cb;\r
-\r
- /* Open the CI CA. */\r
- status = p_ci_ca->verbs.open_ca( p_ci_ca->verbs.guid,\r
- ci_ca_async_event_cb, p_ci_ca, &p_ci_ca->h_ci_ca );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("open_ca failed, status = 0x%x.\n", status) );\r
- return status;\r
- }\r
-\r
- /* Increase the max timeout for the CI CA to handle driver unload. */\r
- set_al_obj_timeout( &p_ci_ca->obj, AL_MAX_TIMEOUT_MS );\r
-\r
- /*\r
- * Register ourselves with the AL manager, so that the open call below\r
- * will succeed.\r
- */\r
- add_ci_ca( p_ci_ca );\r
-\r
- /* Open the AL CA. */\r
- status = ib_open_ca( gh_al, p_ci_ca->verbs.guid, ca_event_cb, p_ci_ca,\r
- &p_ci_ca->h_ca );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_open_ca failed, status = 0x%x.\n", status) );\r
- return status;\r
- }\r
-\r
- /* Get a list of the port GUIDs on this CI CA. */\r
- status = get_port_info( p_ci_ca );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("get_port_guids failed, status = 0x%x.\n", status) );\r
- return status;\r
- }\r
-\r
- /* Allocate a PD for use by AL itself. */\r
- status = ib_alloc_pd( p_ci_ca->h_ca, IB_PDT_SQP, p_ci_ca,\r
- &p_ci_ca->h_pd );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_alloc_pd failed, status = 0x%x.\n", status) );\r
- return status;\r
- }\r
-\r
- /* Allocate a PD for use by AL itself. */\r
- status = ib_alloc_pd( p_ci_ca->h_ca, IB_PDT_ALIAS, p_ci_ca,\r
- &p_ci_ca->h_pd_alias );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_alloc_pd alias failed, status = 0x%x.\n", status) );\r
- return status;\r
- }\r
-\r
- /* Register the global MAD pool on this CA. */\r
- status = ib_reg_mad_pool( gh_mad_pool, p_ci_ca->h_pd, &p_ci_ca->pool_key );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_reg_mad_pool failed, status = 0x%x.\n", status) );\r
- return status;\r
- }\r
-\r
- /*\r
- * Notify the PnP manager that a CA has been added.\r
- * NOTE: PnP Manager must increment the CA reference count.\r
- */\r
- status = pnp_ca_event( p_ci_ca, IB_PNP_CA_ADD );\r
- if( status != IB_SUCCESS )\r
- {\r
- /* Destroy the CA */\r
- p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("al_pnp_add_ca failed, status = 0x%x.\n", status) );\r
- return status;\r
- }\r
-\r
- /* Release the reference taken in init_al_obj. */\r
- deref_al_obj( &p_ci_ca->obj );\r
-\r
- AL_EXIT( AL_DBG_CA );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-void\r
-destroying_ci_ca(\r
- IN al_obj_t* p_obj )\r
-{\r
- al_ci_ca_t *p_ci_ca;\r
-\r
- CL_ASSERT( p_obj );\r
- p_ci_ca = PARENT_STRUCT( p_obj, al_ci_ca_t, obj );\r
-\r
- /*\r
- * Notify the PnP manager that this CA is being removed.\r
- * NOTE: PnP Manager must decrement the CA reference count.\r
- */\r
- pnp_ca_event( p_ci_ca, IB_PNP_CA_REMOVE );\r
-\r
- /*\r
- * We queue a request to the asynchronous processing manager to close\r
- * the CA after the PNP remove CA event has been delivered. This avoids\r
- * the ib_close_ca() call from immediately removing resouces (PDs, QPs)\r
- * that are in use by clients waiting on the remove CA event.\r
- */\r
- if( p_ci_ca->h_ca )\r
- cl_async_proc_queue( gp_async_pnp_mgr, &p_ci_ca->dereg_async_item );\r
-}\r
-\r
-\r
-\r
-void\r
-ci_ca_async_proc_cb(\r
- IN struct _cl_async_proc_item *p_item )\r
-{\r
- al_ci_ca_t *p_ci_ca;\r
-\r
- p_ci_ca = PARENT_STRUCT( p_item, al_ci_ca_t, dereg_async_item );\r
-\r
- /* Release all AL resources acquired by the CI CA. */\r
- ib_close_ca( p_ci_ca->h_ca, NULL );\r
-}\r
-\r
-\r
-\r
-void\r
-cleanup_ci_ca(\r
- IN al_obj_t* p_obj )\r
-{\r
- ib_api_status_t status;\r
- al_ci_ca_t *p_ci_ca;\r
-\r
- AL_ENTER( AL_DBG_CA );\r
-\r
- CL_ASSERT( p_obj );\r
- p_ci_ca = PARENT_STRUCT( p_obj, al_ci_ca_t, obj );\r
-\r
- CL_ASSERT( cl_is_qlist_empty( &p_ci_ca->shmid_list ) );\r
-\r
- if( p_ci_ca->h_ci_ca )\r
- {\r
- remove_ci_ca( p_ci_ca );\r
- status = p_ci_ca->verbs.close_ca( p_ci_ca->h_ci_ca );\r
- CL_ASSERT( status == IB_SUCCESS );\r
- }\r
-\r
- AL_EXIT( AL_DBG_CA );\r
-}\r
-\r
-\r
-\r
-void\r
-ci_ca_comp_cb(\r
- IN void *cq_context )\r
-{\r
- ib_cq_handle_t h_cq = (ib_cq_handle_t)cq_context;\r
-\r
- if( h_cq->h_wait_obj )\r
- KeSetEvent( h_cq->h_wait_obj, IO_NETWORK_INCREMENT, FALSE );\r
- else\r
- h_cq->pfn_user_comp_cb( h_cq, (void*)h_cq->obj.context );\r
-}\r
-\r
-\r
-\r
-/*\r
- * CI CA asynchronous event callback.\r
- */\r
-void\r
-ci_ca_async_event_cb(\r
- IN ib_event_rec_t* p_event_record )\r
-{\r
- ib_async_event_rec_t event_rec;\r
-\r
- AL_ENTER( AL_DBG_CA );\r
-\r
- CL_ASSERT( p_event_record );\r
-\r
- event_rec.code = p_event_record->type;\r
- event_rec.context = p_event_record->context;\r
- event_rec.vendor_specific = p_event_record->vendor_specific;\r
-\r
- ci_ca_async_event( &event_rec );\r
-\r
- AL_EXIT( AL_DBG_CA );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Insert a new shmid tracking structure into the CI CA's list.\r
- */\r
-void\r
-add_shmid(\r
- IN al_ci_ca_t* const p_ci_ca,\r
- IN struct _al_shmid *p_shmid )\r
-{\r
- CL_ASSERT( p_ci_ca && p_shmid );\r
-\r
- p_shmid->obj.p_ci_ca = p_ci_ca;\r
-\r
- /* Insert the shmid structure into the shmid list. */\r
- cl_spinlock_acquire( &p_ci_ca->obj.lock );\r
- cl_qlist_insert_head( &p_ci_ca->shmid_list, &p_shmid->list_item );\r
- cl_spinlock_release( &p_ci_ca->obj.lock );\r
-}\r
-\r
-\r
-\r
-ib_api_status_t\r
-acquire_shmid(\r
- IN al_ci_ca_t* const p_ci_ca,\r
- IN int shmid,\r
- OUT struct _al_shmid **pp_shmid )\r
-{\r
- al_shmid_t *p_shmid;\r
- cl_list_item_t *p_list_item;\r
-\r
- /* Try to find the shmid. */\r
- cl_spinlock_acquire( &p_ci_ca->obj.lock );\r
- for( p_list_item = cl_qlist_head( &p_ci_ca->shmid_list );\r
- p_list_item != cl_qlist_end( &p_ci_ca->shmid_list );\r
- p_list_item = cl_qlist_next( p_list_item ) )\r
- {\r
- p_shmid = PARENT_STRUCT( p_list_item, al_shmid_t, list_item );\r
- if( p_shmid->id == shmid )\r
- {\r
- ref_al_obj( &p_shmid->obj );\r
- *pp_shmid = p_shmid;\r
- break;\r
- }\r
- }\r
- cl_spinlock_release( &p_ci_ca->obj.lock );\r
-\r
- if( p_list_item == cl_qlist_end( &p_ci_ca->shmid_list ) )\r
- return IB_NOT_FOUND;\r
- else\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-void\r
-release_shmid(\r
- IN struct _al_shmid *p_shmid )\r
-{\r
- al_ci_ca_t *p_ci_ca;\r
- int32_t ref_cnt;\r
-\r
- CL_ASSERT( p_shmid );\r
-\r
- p_ci_ca = p_shmid->obj.p_ci_ca;\r
-\r
- cl_spinlock_acquire( &p_ci_ca->obj.lock );\r
-\r
- /* Dereference the shmid. */\r
- ref_cnt = deref_al_obj( &p_shmid->obj );\r
-\r
- /* If the shmid is no longer in active use, remove it. */\r
- if( ref_cnt == 1 )\r
- cl_qlist_remove_item( &p_ci_ca->shmid_list, &p_shmid->list_item );\r
-\r
- cl_spinlock_release( &p_ci_ca->obj.lock );\r
-\r
- /* Destroy the shmid if it is not needed. */\r
- if( ref_cnt == 1 )\r
- {\r
- ref_al_obj( &p_shmid->obj );\r
- p_shmid->obj.pfn_destroy( &p_shmid->obj, NULL );\r
- }\r
-}\r
-\r
-\r
-\r
-ib_api_status_t\r
-ib_ci_call(\r
- IN ib_ca_handle_t h_ca,\r
- IN const void* __ptr64 * const handle_array OPTIONAL,\r
- IN uint32_t num_handles,\r
- IN ib_ci_op_t* const p_ci_op )\r
-{\r
- return ci_call( h_ca, handle_array, num_handles, p_ci_op, NULL );\r
-}\r
-\r
-\r
-\r
-ib_api_status_t\r
-ci_call(\r
- IN ib_ca_handle_t h_ca,\r
- IN const void* __ptr64 * const handle_array OPTIONAL,\r
- IN uint32_t num_handles,\r
- IN ib_ci_op_t* const p_ci_op,\r
- IN ci_umv_buf_t* const p_umv_buf OPTIONAL )\r
-{\r
- void* __ptr64 * p_handle_array;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_CA );\r
-\r
- if( AL_OBJ_INVALID_HANDLE( h_ca, AL_OBJ_TYPE_H_CA ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CA_HANDLE\n") );\r
- return IB_INVALID_CA_HANDLE;\r
- }\r
- if( !p_ci_op )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
- return IB_INVALID_PARAMETER;\r
- }\r
- p_handle_array = NULL;\r
- if ( num_handles )\r
- {\r
- p_handle_array = cl_zalloc( sizeof(void* __ptr64) * num_handles );\r
- if( !p_handle_array )\r
- return IB_INSUFFICIENT_MEMORY;\r
-\r
- status = al_convert_to_ci_handles( p_handle_array, handle_array,\r
- num_handles );\r
-\r
- if( status != IB_SUCCESS )\r
- {\r
- cl_free( p_handle_array );\r
- return status;\r
- }\r
- }\r
-\r
- if( h_ca->obj.p_ci_ca->verbs.vendor_call )\r
- {\r
- status = verbs_ci_call(\r
- h_ca, p_handle_array, num_handles, p_ci_op, p_umv_buf );\r
- }\r
- else\r
- {\r
- status = IB_UNSUPPORTED;\r
- }\r
-\r
- if ( num_handles )\r
- cl_free( p_handle_array );\r
-\r
- AL_EXIT( AL_DBG_QUERY );\r
- return status;\r
-}\r
-\r
-\r
-DEVICE_OBJECT*\r
-get_ca_dev(\r
- IN const ib_ca_handle_t h_ca )\r
-{\r
- ASSERT( h_ca );\r
-\r
- ObReferenceObject( h_ca->obj.p_ci_ca->verbs.p_hca_dev );\r
- return h_ca->obj.p_ci_ca->verbs.p_hca_dev;\r
-}
\ No newline at end of file
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_cm_cep.c 931 2008-01-31 09:20:41Z leonidk $\r
- */\r
-\r
-\r
-#include <iba/ib_al.h>\r
-#include <complib/cl_vector.h>\r
-#include <complib/cl_rbmap.h>\r
-#include <complib/cl_qmap.h>\r
-#include <complib/cl_spinlock.h>\r
-#include "al_common.h"\r
-#include "al_cm_cep.h"\r
-#include "al_cm_conn.h"\r
-#include "al_cm_sidr.h"\r
-#include "al_debug.h"\r
-#if defined(EVENT_TRACING)\r
-#ifdef offsetof\r
-#undef offsetof\r
-#endif\r
-#include "al_cm_cep.tmh"\r
-#endif\r
-#include "ib_common.h"\r
-#include "al_mgr.h"\r
-#include "al_ca.h"\r
-#include "al.h"\r
-#include "al_mad.h"\r
-#include "al_qp.h"\r
-\r
-\r
-/*\r
- * The vector object uses a list item at the front of the buffers\r
- * it allocates. Take the list item into account so that allocations\r
- * are for full page sizes.\r
- */\r
-#define CEP_CID_MIN \\r
- ((PAGE_SIZE - sizeof(cl_list_item_t)) / sizeof(cep_cid_t))\r
-#define CEP_CID_GROW \\r
- ((PAGE_SIZE - sizeof(cl_list_item_t)) / sizeof(cep_cid_t))\r
-\r
-/*\r
- * We reserve the upper byte of the connection ID as a revolving counter so\r
- * that connections that are retried by the client change connection ID.\r
- * This counter is never zero, so it is OK to use all CIDs since we will never\r
- * have a full CID (base + counter) that is zero.\r
- * See the IB spec, section 12.9.8.7 for details about REJ retry.\r
- */\r
-#define CEP_MAX_CID (0x00FFFFFF)\r
-#define CEP_MAX_CID_MASK (0x00FFFFFF)\r
-\r
-#define CEP_MAD_SQ_DEPTH (128)\r
-#define CEP_MAD_RQ_DEPTH (1) /* ignored. */\r
-#define CEP_MAD_SQ_SGE (1)\r
-#define CEP_MAD_RQ_SGE (1) /* ignored. */\r
-\r
-\r
-/* Global connection manager object. */\r
-typedef struct _al_cep_mgr\r
-{\r
- al_obj_t obj;\r
-\r
- cl_qmap_t port_map;\r
-\r
- KSPIN_LOCK lock;\r
-\r
- /* Bitmap of CEPs, indexed by CID. */\r
- cl_vector_t cid_vector;\r
- uint32_t free_cid;\r
-\r
- /* List of active listens. */\r
- cl_rbmap_t listen_map;\r
-\r
- /* Map of CEP by remote CID and CA GUID. */\r
- cl_rbmap_t conn_id_map;\r
- /* Map of CEP by remote QPN, used for stale connection matching. */\r
- cl_rbmap_t conn_qp_map;\r
-\r
- NPAGED_LOOKASIDE_LIST cep_pool;\r
- NPAGED_LOOKASIDE_LIST req_pool;\r
-\r
- /*\r
- * Periodically walk the list of connections in the time wait state\r
- * and flush them as appropriate.\r
- */\r
- cl_timer_t timewait_timer;\r
- cl_qlist_t timewait_list;\r
-\r
- ib_pnp_handle_t h_pnp;\r
-\r
-} al_cep_mgr_t;\r
-\r
-\r
-/* Per-port CM object. */\r
-typedef struct _cep_port_agent\r
-{\r
- al_obj_t obj;\r
-\r
- cl_map_item_t item;\r
-\r
- ib_ca_handle_t h_ca;\r
- ib_pd_handle_t h_pd;\r
- ib_qp_handle_t h_qp;\r
- ib_pool_key_t pool_key;\r
- ib_mad_svc_handle_t h_mad_svc;\r
-\r
- net64_t port_guid;\r
- uint8_t port_num;\r
- net16_t base_lid;\r
-\r
-} cep_agent_t;\r
-\r
-\r
-/*\r
- * Note: the REQ, REP, and LAP values must be 1, 2, and 4 respectively.\r
- * This allows shifting 1 << msg_mraed from an MRA to figure out for what\r
- * message the MRA was sent for.\r
- */\r
-#define CEP_STATE_RCVD 0x10000000\r
-#define CEP_STATE_SENT 0x20000000\r
-#define CEP_STATE_MRA 0x01000000\r
-#define CEP_STATE_REQ 0x00000001\r
-#define CEP_STATE_REP 0x00000002\r
-#define CEP_STATE_LAP 0x00000004\r
-#define CEP_STATE_RTU 0x00000008\r
-#define CEP_STATE_DREQ 0x00000010\r
-#define CEP_STATE_DREP 0x00000020\r
-#define CEP_STATE_DESTROYING 0x00010000\r
-#define CEP_STATE_USER 0x00020000\r
-\r
-#define CEP_MSG_MASK 0x000000FF\r
-#define CEP_OP_MASK 0xF0000000\r
-\r
-#define CEP_STATE_PREP 0x00100000\r
-\r
-/* States match CM state transition diagrams from spec. */\r
-typedef enum _cep_state\r
-{\r
- CEP_STATE_IDLE,\r
- CEP_STATE_LISTEN,\r
- CEP_STATE_ESTABLISHED,\r
- CEP_STATE_TIMEWAIT,\r
- CEP_STATE_SREQ_SENT,\r
- CEP_STATE_SREQ_RCVD,\r
- CEP_STATE_ERROR,\r
- CEP_STATE_DESTROY = CEP_STATE_DESTROYING,\r
- CEP_STATE_PRE_REQ = CEP_STATE_IDLE | CEP_STATE_PREP,\r
- CEP_STATE_REQ_RCVD = CEP_STATE_REQ | CEP_STATE_RCVD,\r
- CEP_STATE_PRE_REP = CEP_STATE_REQ_RCVD | CEP_STATE_PREP,\r
- CEP_STATE_REQ_SENT = CEP_STATE_REQ | CEP_STATE_SENT,\r
- CEP_STATE_REQ_MRA_RCVD = CEP_STATE_REQ_SENT | CEP_STATE_MRA,\r
- CEP_STATE_REQ_MRA_SENT = CEP_STATE_REQ_RCVD | CEP_STATE_MRA,\r
- CEP_STATE_PRE_REP_MRA_SENT = CEP_STATE_REQ_MRA_SENT | CEP_STATE_PREP,\r
- CEP_STATE_REP_RCVD = CEP_STATE_REP | CEP_STATE_RCVD,\r
- CEP_STATE_REP_SENT = CEP_STATE_REP | CEP_STATE_SENT,\r
- CEP_STATE_REP_MRA_RCVD = CEP_STATE_REP_SENT | CEP_STATE_MRA,\r
- CEP_STATE_REP_MRA_SENT = CEP_STATE_REP_RCVD | CEP_STATE_MRA,\r
- CEP_STATE_LAP_RCVD = CEP_STATE_LAP | CEP_STATE_RCVD,\r
- CEP_STATE_PRE_APR = CEP_STATE_LAP_RCVD | CEP_STATE_PREP,\r
- CEP_STATE_LAP_SENT = CEP_STATE_LAP | CEP_STATE_SENT,\r
- CEP_STATE_LAP_MRA_RCVD = CEP_STATE_LAP_SENT | CEP_STATE_MRA,\r
- CEP_STATE_LAP_MRA_SENT = CEP_STATE_LAP_RCVD | CEP_STATE_MRA,\r
- CEP_STATE_PRE_APR_MRA_SENT = CEP_STATE_LAP_MRA_SENT | CEP_STATE_PREP,\r
- CEP_STATE_DREQ_SENT = CEP_STATE_DREQ | CEP_STATE_SENT,\r
- CEP_STATE_DREQ_RCVD = CEP_STATE_DREQ | CEP_STATE_RCVD,\r
- CEP_STATE_DREQ_DESTROY = CEP_STATE_DREQ_SENT | CEP_STATE_DESTROYING\r
-\r
-} cep_state_t;\r
-\r
-\r
-/* Active side CEP state transitions:\r
-* al_create_cep -> IDLE\r
-* al_cep_pre_req -> PRE_REQ\r
-* al_cep_send_req -> REQ_SENT\r
-* Recv REQ MRA -> REQ_MRA_RCVD\r
-* Recv REP -> REP_RCVD\r
-* al_cep_mra -> REP_MRA_SENT\r
-* al_cep_rtu -> ESTABLISHED\r
-*\r
-* Passive side CEP state transitions:\r
-* al_create_cep -> IDLE\r
-* Recv REQ -> REQ_RCVD\r
-* al_cep_mra* -> REQ_MRA_SENT\r
-* al_cep_pre_rep -> PRE_REP\r
-* al_cep_mra* -> PRE_REP_MRA_SENT\r
-* al_cep_send_rep -> REP_SENT\r
-* Recv RTU -> ESTABLISHED\r
-*\r
-* *al_cep_mra can only be called once - either before or after PRE_REP.\r
-*/\r
-\r
-typedef struct _al_kcep_av\r
-{\r
- ib_av_attr_t attr;\r
- net64_t port_guid;\r
- uint16_t pkey_index;\r
-\r
-} kcep_av_t;\r
-\r
-\r
-typedef struct _al_kcep\r
-{\r
- net32_t cid;\r
- void* context;\r
-\r
- struct _cep_cid *p_cid;\r
-\r
- net64_t sid;\r
-\r
- /* Port guid for filtering incoming requests. */\r
- net64_t port_guid;\r
-\r
- uint8_t* __ptr64 p_cmp_buf;\r
- uint8_t cmp_offset;\r
- uint8_t cmp_len;\r
-\r
- boolean_t p2p;\r
-\r
- /* Used to store connection structure with owning AL instance. */\r
- cl_list_item_t al_item;\r
-\r
- /* Flag to indicate whether a user is processing events. */\r
- boolean_t signalled;\r
-\r
- /* Destroy callback. */\r
- ib_pfn_destroy_cb_t pfn_destroy_cb;\r
-\r
- ib_mad_element_t *p_mad_head;\r
- ib_mad_element_t *p_mad_tail;\r
- al_pfn_cep_cb_t pfn_cb;\r
-\r
- IRP *p_irp;\r
-\r
- /* MAP item for finding listen CEPs. */\r
- cl_rbmap_item_t listen_item;\r
-\r
- /* Map item for finding CEPs based on remote comm ID & CA GUID. */\r
- cl_rbmap_item_t rem_id_item;\r
-\r
- /* Map item for finding CEPs based on remote QP number. */\r
- cl_rbmap_item_t rem_qp_item;\r
-\r
- /* Communication ID's for the connection. */\r
- net32_t local_comm_id;\r
- net32_t remote_comm_id;\r
-\r
- net64_t local_ca_guid;\r
- net64_t remote_ca_guid;\r
-\r
- /* Remote QP, used for stale connection checking. */\r
- net32_t remote_qpn;\r
-\r
- /* Parameters to format QP modification structure. */\r
- net32_t sq_psn;\r
- net32_t rq_psn;\r
- uint8_t resp_res;\r
- uint8_t init_depth;\r
- uint8_t rnr_nak_timeout;\r
-\r
- /*\r
- * Local QP number, used for the "additional check" required\r
- * of the DREQ.\r
- */\r
- net32_t local_qpn;\r
-\r
- /* PKEY to make sure a LAP is on the same partition. */\r
- net16_t pkey;\r
-\r
- /* Initiator depth as received in the REQ. */\r
- uint8_t req_init_depth;\r
-\r
- /*\r
- * Primary and alternate path info, used to create the address vectors for\r
- * sending MADs, to locate the port CM agent to use for outgoing sends,\r
- * and for creating the address vectors for transitioning QPs.\r
- */\r
- kcep_av_t av[2];\r
- uint8_t idx_primary;\r
-\r
- /* Temporary AV and CEP port GUID used when processing LAP. */\r
- kcep_av_t alt_av;\r
- uint8_t alt_2pkt_life;\r
-\r
- /* maxium packet lifetime * 2 of any path used on a connection. */\r
- uint8_t max_2pkt_life;\r
- /* Given by the REP, used for alternate path setup. */\r
- uint8_t target_ack_delay;\r
- /* Stored to help calculate the local ACK delay in the LAP. */\r
- uint8_t local_ack_delay;\r
-\r
- /* Volatile to allow using atomic operations for state checks. */\r
- cep_state_t state;\r
-\r
- /*\r
- * Flag that indicates whether a connection took the active role during\r
- * establishment. \r
- */\r
- boolean_t was_active;\r
-\r
- /*\r
- * Handle to the sent MAD, used for cancelling. We store the handle to\r
- * the mad service so that we can properly cancel. This should not be a\r
- * problem since all outstanding sends should be completed before the\r
- * mad service completes its destruction and the handle becomes invalid.\r
- */\r
- ib_mad_svc_handle_t h_mad_svc;\r
- ib_mad_element_t *p_send_mad;\r
-\r
- /* Number of outstanding MADs. Delays destruction of CEP destruction. */\r
- atomic32_t ref_cnt;\r
-\r
- /* MAD transaction ID to use when sending MADs. */\r
- uint64_t tid;\r
-\r
- /* Maximum retries per MAD. Set at REQ time, stored to retry LAP. */\r
- uint8_t max_cm_retries;\r
- /* Timeout value, in milliseconds. Set at REQ time, stored to retry LAP. */\r
- uint32_t retry_timeout;\r
-\r
- /* Timer that will be signalled when the CEP exits timewait. */\r
- KTIMER timewait_timer;\r
- LARGE_INTEGER timewait_time;\r
- cl_list_item_t timewait_item;\r
-\r
- /*\r
- * Pointer to a formatted MAD. The pre_req, pre_rep and pre_apr calls\r
- * allocate and format the MAD, and the send_req, send_rep and send_apr\r
- * calls send it.\r
- */\r
- ib_mad_element_t *p_mad;\r
-\r
- /* Cache the last MAD sent for retransmission. */\r
- union _mads\r
- {\r
- ib_mad_t hdr;\r
- mad_cm_mra_t mra;\r
- mad_cm_rtu_t rtu;\r
- mad_cm_drep_t drep;\r
-\r
- } mads;\r
-\r
- /*\r
- * NDI stuff\r
- */\r
-\r
- /* IRP list head */\r
- LIST_ENTRY irp_que;\r
-\r
- /* private data of REQ, REP, REJ CM requests */ \r
- uint8_t psize;\r
- uint8_t pdata[IB_REP_PDATA_SIZE];\r
-\r
-} kcep_t;\r
-\r
-\r
-/* Structures stored in the CID vector. */\r
-typedef struct _cep_cid\r
-{\r
- /* Owning AL handle. NULL if invalid. */\r
- ib_al_handle_t h_al;\r
- /* Pointer to CEP, or index of next free entry if h_al is NULL. */\r
- kcep_t *p_cep;\r
- /* For REJ Retry support */\r
- uint8_t modifier;\r
-\r
-} cep_cid_t;\r
-\r
-\r
-/* Global instance of the CM agent. */\r
-al_cep_mgr_t *gp_cep_mgr = NULL;\r
-\r
-\r
-static ib_api_status_t\r
-__format_drep(\r
- IN kcep_t* const p_cep,\r
- IN const uint8_t* p_pdata OPTIONAL,\r
- IN uint8_t pdata_len,\r
- IN OUT mad_cm_drep_t* const p_drep );\r
-\r
-static ib_api_status_t\r
-__cep_queue_mad(\r
- IN kcep_t* const p_cep,\r
- IN ib_mad_element_t* p_mad );\r
-\r
-static inline void\r
-__process_cep(\r
- IN kcep_t* const p_cep );\r
-\r
-static inline uint32_t\r
-__calc_mad_timeout(\r
- IN const uint8_t pkt_life );\r
-\r
-static inline void\r
-__calc_timewait(\r
- IN kcep_t* const p_cep );\r
-\r
-static kcep_t*\r
-__create_cep( void );\r
-\r
-static int32_t\r
-__cleanup_cep(\r
- IN kcep_t* const p_cep );\r
-\r
-static void\r
-__destroy_cep(\r
- IN kcep_t* const p_cep );\r
-\r
-static inline void\r
-__bind_cep(\r
- IN kcep_t* const p_cep,\r
- IN ib_al_handle_t h_al,\r
- IN al_pfn_cep_cb_t pfn_cb,\r
- IN void* __ptr64 context );\r
-\r
-static inline void\r
-__unbind_cep(\r
- IN kcep_t* const p_cep );\r
-\r
-static void\r
-__pre_destroy_cep(\r
- IN kcep_t* const p_cep );\r
-\r
-static kcep_t*\r
-__lookup_by_id(\r
- IN net32_t remote_comm_id,\r
- IN net64_t remote_ca_guid );\r
-\r
-static kcep_t*\r
-__lookup_listen(\r
- IN net64_t sid,\r
- IN net64_t port_guid,\r
- IN void *p_pdata );\r
-\r
-static inline kcep_t*\r
-__lookup_cep(\r
- IN ib_al_handle_t h_al OPTIONAL,\r
- IN net32_t cid );\r
-\r
-static inline kcep_t*\r
-__insert_cep(\r
- IN kcep_t* const p_new_cep );\r
-\r
-static inline void\r
-__remove_cep(\r
- IN kcep_t* const p_cep );\r
-\r
-static inline void\r
-__insert_timewait(\r
- IN kcep_t* const p_cep );\r
-\r
-static ib_api_status_t\r
-__cep_get_mad(\r
- IN kcep_t* const p_cep,\r
- IN net16_t attr_id,\r
- OUT cep_agent_t** const pp_port_cep,\r
- OUT ib_mad_element_t** const pp_mad );\r
-\r
-static ib_api_status_t\r
-__cep_send_mad(\r
- IN cep_agent_t* const p_port_cep,\r
- IN ib_mad_element_t* const p_mad );\r
-\r
-/* Returns the 1-based port index of the CEP agent with the specified GID. */\r
-static cep_agent_t*\r
-__find_port_cep(\r
- IN const ib_gid_t* const p_gid,\r
- IN const net16_t lid,\r
- IN const net16_t pkey,\r
- OUT uint16_t* const p_pkey_index );\r
-\r
-static cep_cid_t*\r
-__get_lcid(\r
- OUT net32_t* const p_cid );\r
-\r
-static void\r
-__process_cep_send_comp(\r
- IN cl_async_proc_item_t *p_item );\r
-\r
-\r
-/******************************************************************************\r
-* Per-port CEP agent\r
-******************************************************************************/\r
-\r
-\r
-static inline void\r
-__format_mad_hdr(\r
- IN ib_mad_t* const p_mad,\r
- IN const kcep_t* const p_cep,\r
- IN net16_t attr_id )\r
-{\r
- p_mad->base_ver = 1;\r
- p_mad->mgmt_class = IB_MCLASS_COMM_MGMT;\r
- p_mad->class_ver = IB_MCLASS_CM_VER_2;\r
- p_mad->method = IB_MAD_METHOD_SEND;\r
- p_mad->status = 0;\r
- p_mad->class_spec = 0;\r
- p_mad->trans_id = p_cep->tid;\r
- p_mad->attr_id = attr_id;\r
- p_mad->resv = 0;\r
- p_mad->attr_mod = 0;\r
-}\r
-\r
-\r
-/* Consumes the input MAD. */\r
-static void\r
-__reject_mad(\r
- IN cep_agent_t* const p_port_cep,\r
- IN kcep_t* const p_cep,\r
- IN ib_mad_element_t* const p_mad,\r
- IN ib_rej_status_t reason )\r
-{\r
- mad_cm_rej_t *p_rej;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_rej = (mad_cm_rej_t*)p_mad->p_mad_buf;\r
-\r
- __format_mad_hdr( p_mad->p_mad_buf, p_cep, CM_REJ_ATTR_ID );\r
-\r
- p_rej->local_comm_id = p_cep->local_comm_id;\r
- p_rej->remote_comm_id = p_cep->remote_comm_id;\r
- p_rej->reason = reason;\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_REQ_RCVD:\r
- case CEP_STATE_REQ_MRA_SENT:\r
- case CEP_STATE_PRE_REP:\r
- case CEP_STATE_PRE_REP_MRA_SENT:\r
- conn_rej_set_msg_rejected( 0, p_rej );\r
- break;\r
-\r
- case CEP_STATE_REP_RCVD:\r
- case CEP_STATE_REP_MRA_SENT:\r
- conn_rej_set_msg_rejected( 1, p_rej );\r
- break;\r
-\r
- default:\r
- CL_ASSERT( reason == IB_REJ_TIMEOUT );\r
- conn_rej_set_msg_rejected( 2, p_rej );\r
- break;\r
- }\r
-\r
- conn_rej_clr_rsvd_fields( p_rej );\r
- __cep_send_mad( p_port_cep, p_mad );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static void\r
-__reject_timeout(\r
- IN cep_agent_t* const p_port_cep,\r
- IN kcep_t* const p_cep,\r
- IN const ib_mad_element_t* const p_mad )\r
-{\r
- ib_api_status_t status;\r
- ib_mad_element_t *p_rej_mad;\r
- ib_mad_t *p_mad_buf;\r
- ib_grh_t *p_grh;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_rej_mad );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_get_mad returned %s\n", ib_get_err_str( status )) );\r
- return;\r
- }\r
-\r
- /* Save the buffer pointers from the new element. */\r
- p_mad_buf = p_rej_mad->p_mad_buf;\r
- p_grh = p_rej_mad->p_grh;\r
-\r
- /*\r
- * Copy the input MAD element to the reject - this gives us\r
- * all appropriate addressing information.\r
- */\r
- cl_memcpy( p_rej_mad, p_mad, sizeof(ib_mad_element_t) );\r
- cl_memcpy( p_grh, p_mad->p_grh, sizeof(ib_grh_t) );\r
-\r
- /* Restore the buffer pointers now that the copy is complete. */\r
- p_rej_mad->p_mad_buf = p_mad_buf;\r
- p_rej_mad->p_grh = p_grh;\r
-\r
- status = conn_rej_set_pdata( NULL, 0, (mad_cm_rej_t*)p_mad_buf );\r
- CL_ASSERT( status == IB_SUCCESS );\r
-\r
- /* Copy the local CA GUID into the ARI. */\r
- switch( p_mad->p_mad_buf->attr_id )\r
- {\r
- case CM_REQ_ATTR_ID:\r
- status = conn_rej_set_ari(\r
- (uint8_t*)&p_cep->local_ca_guid,\r
- sizeof(p_cep->local_ca_guid), (mad_cm_rej_t*)p_mad_buf );\r
- CL_ASSERT( status == IB_SUCCESS );\r
- __reject_mad( p_port_cep, p_cep, p_rej_mad, IB_REJ_TIMEOUT );\r
- break;\r
-\r
- case CM_REP_ATTR_ID:\r
- status = conn_rej_set_ari(\r
- (uint8_t*)&p_cep->local_ca_guid,\r
- sizeof(p_cep->local_ca_guid), (mad_cm_rej_t*)p_mad_buf );\r
- CL_ASSERT( status == IB_SUCCESS );\r
- __reject_mad( p_port_cep, p_cep, p_rej_mad, IB_REJ_TIMEOUT );\r
- break;\r
-\r
- default:\r
- CL_ASSERT( p_mad->p_mad_buf->attr_id == CM_REQ_ATTR_ID ||\r
- p_mad->p_mad_buf->attr_id == CM_REP_ATTR_ID );\r
- ib_put_mad( p_rej_mad );\r
- return;\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static void\r
-__reject_req(\r
- IN cep_agent_t* const p_port_cep,\r
- IN ib_mad_element_t* const p_mad,\r
- IN const ib_rej_status_t reason )\r
-{\r
- mad_cm_req_t *p_req;\r
- mad_cm_rej_t *p_rej;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( p_port_cep );\r
- CL_ASSERT( p_mad );\r
- CL_ASSERT( reason != 0 );\r
-\r
- p_req = (mad_cm_req_t*)p_mad->p_mad_buf;\r
- p_rej = (mad_cm_rej_t*)p_mad->p_mad_buf;\r
-\r
- /*\r
- * Format the reject information, overwriting the REQ data and send\r
- * the response.\r
- */\r
- p_rej->hdr.attr_id = CM_REJ_ATTR_ID;\r
- p_rej->remote_comm_id = p_req->local_comm_id;\r
- p_rej->local_comm_id = 0;\r
- conn_rej_set_msg_rejected( 0, p_rej );\r
- p_rej->reason = reason;\r
- conn_rej_set_ari( NULL, 0, p_rej );\r
- conn_rej_set_pdata( NULL, 0, p_rej );\r
- conn_rej_clr_rsvd_fields( p_rej );\r
-\r
- p_mad->retry_cnt = 0;\r
- p_mad->send_opt = 0;\r
- p_mad->timeout_ms = 0;\r
- p_mad->resp_expected = FALSE;\r
-\r
- __cep_send_mad( p_port_cep, p_mad );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static void\r
-__format_req_av(\r
- IN kcep_t* const p_cep,\r
- IN const mad_cm_req_t* const p_req,\r
- IN const uint8_t idx )\r
-{\r
- cep_agent_t *p_port_cep;\r
- const req_path_info_t *p_path;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( p_cep );\r
- CL_ASSERT( p_req );\r
-\r
- cl_memclr( &p_cep->av[idx], sizeof(kcep_av_t) );\r
-\r
- p_path = &((&p_req->primary_path)[idx]);\r
-\r
- p_port_cep = __find_port_cep( &p_path->remote_gid,\r
- p_path->remote_lid, p_req->pkey, &p_cep->av[idx].pkey_index );\r
- if( !p_port_cep )\r
- {\r
- if( !idx )\r
- p_cep->local_ca_guid = 0;\r
- AL_EXIT( AL_DBG_CM );\r
- return;\r
- }\r
-\r
- if( !idx )\r
- p_cep->local_ca_guid = p_port_cep->h_ca->obj.p_ci_ca->verbs.guid;\r
-\r
- /* Check that CA GUIDs match if formatting the alternate path. */\r
- if( idx &&\r
- p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return;\r
- }\r
-\r
- /*\r
- * Pkey indeces must match if formating the alternat path - the QP\r
- * modify structure only allows for a single PKEY index to be specified.\r
- */\r
- if( idx &&\r
- p_cep->av[0].pkey_index != p_cep->av[1].pkey_index )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return;\r
- }\r
-\r
- p_cep->av[idx].port_guid = p_port_cep->port_guid;\r
- p_cep->av[idx].attr.port_num = p_port_cep->port_num;\r
-\r
- p_cep->av[idx].attr.sl = conn_req_path_get_svc_lvl( p_path );\r
- p_cep->av[idx].attr.dlid = p_path->local_lid;\r
-\r
- if( !conn_req_path_get_subn_lcl( p_path ) )\r
- {\r
- p_cep->av[idx].attr.grh_valid = TRUE;\r
- p_cep->av[idx].attr.grh.ver_class_flow = ib_grh_set_ver_class_flow(\r
- 1, p_path->traffic_class, conn_req_path_get_flow_lbl( p_path ) );\r
- p_cep->av[idx].attr.grh.hop_limit = p_path->hop_limit;\r
- p_cep->av[idx].attr.grh.dest_gid = p_path->local_gid;\r
- p_cep->av[idx].attr.grh.src_gid = p_path->remote_gid;\r
- }\r
- else\r
- {\r
- p_cep->av[idx].attr.grh_valid = FALSE;\r
- }\r
- p_cep->av[idx].attr.static_rate = conn_req_path_get_pkt_rate( p_path );\r
- p_cep->av[idx].attr.path_bits =\r
- (uint8_t)(p_path->remote_lid - p_port_cep->base_lid);\r
-\r
- /*\r
- * Note that while we never use the connected AV attributes internally,\r
- * we store them so we can pass them back to users.\r
- */\r
- p_cep->av[idx].attr.conn.path_mtu = conn_req_get_mtu( p_req );\r
- p_cep->av[idx].attr.conn.local_ack_timeout =\r
- conn_req_path_get_lcl_ack_timeout( p_path );\r
- p_cep->av[idx].attr.conn.seq_err_retry_cnt =\r
- conn_req_get_retry_cnt( p_req );\r
- p_cep->av[idx].attr.conn.rnr_retry_cnt =\r
- conn_req_get_rnr_retry_cnt( p_req );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-/*\r
- * + Validates the path information provided in the REQ and stores the\r
- * associated CA attributes and port indeces.\r
- * + Transitions a connection object from active to passive in the peer case.\r
- * + Sets the path information in the connection and sets the CA GUID\r
- * in the REQ callback record.\r
- */\r
-static void\r
-__save_wire_req(\r
- IN OUT kcep_t* const p_cep,\r
- IN OUT mad_cm_req_t* const p_req )\r
-{\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_cep->state = CEP_STATE_REQ_RCVD;\r
- p_cep->was_active = FALSE;\r
-\r
- p_cep->sid = p_req->sid;\r
-\r
- /* Store pertinent information in the connection. */\r
- p_cep->remote_comm_id = p_req->local_comm_id;\r
- p_cep->remote_ca_guid = p_req->local_ca_guid;\r
-\r
- p_cep->remote_qpn = conn_req_get_lcl_qpn( p_req );\r
- p_cep->local_qpn = 0;\r
-\r
- p_cep->retry_timeout =\r
- __calc_mad_timeout( conn_req_get_lcl_resp_timeout( p_req ) );\r
-\r
- /* Store the retry count. */\r
- p_cep->max_cm_retries = conn_req_get_max_cm_retries( p_req );\r
-\r
- /*\r
- * Copy the paths from the req_rec into the connection for\r
- * future use. Note that if the primary path is invalid,\r
- * the REP will fail.\r
- */\r
- __format_req_av( p_cep, p_req, 0 );\r
-\r
- if( p_req->alternate_path.local_lid )\r
- __format_req_av( p_cep, p_req, 1 );\r
- else\r
- cl_memclr( &p_cep->av[1], sizeof(kcep_av_t) );\r
-\r
- p_cep->idx_primary = 0;\r
-\r
- /* Store the maximum packet lifetime, used to calculate timewait. */\r
- p_cep->max_2pkt_life = conn_req_path_get_lcl_ack_timeout( &p_req->primary_path );\r
- p_cep->max_2pkt_life = max( p_cep->max_2pkt_life,\r
- conn_req_path_get_lcl_ack_timeout( &p_req->alternate_path ) );\r
-\r
- /*\r
- * Make sure the target ack delay is cleared - the above\r
- * "packet life" includes it.\r
- */\r
- p_cep->target_ack_delay = 0;\r
-\r
- /* Store the requested initiator depth. */\r
- p_cep->req_init_depth = conn_req_get_init_depth( p_req );\r
-\r
- /*\r
- * Store the provided responder resources. These turn into the local\r
- * QP's initiator depth.\r
- */\r
- p_cep->init_depth = conn_req_get_resp_res( p_req );\r
-\r
- p_cep->sq_psn = conn_req_get_starting_psn( p_req );\r
-\r
- p_cep->tid = p_req->hdr.trans_id;\r
- /* copy mad info for cm handoff */\r
- /* TODO: Do need to support CM handoff? */\r
- //p_cep->mads.req = *p_req;\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-/* Must be called with the CEP lock held. */\r
-static void\r
-__repeat_mad(\r
- IN cep_agent_t* const p_port_cep,\r
- IN kcep_t* const p_cep,\r
- IN ib_mad_element_t* const p_mad )\r
-{\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( p_port_cep );\r
- CL_ASSERT( p_cep );\r
- CL_ASSERT( p_mad );\r
-\r
- /* Repeat the last mad sent for the connection. */\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_REQ_MRA_SENT: /* resend MRA(REQ) */\r
- case CEP_STATE_REP_MRA_SENT: /* resend MRA(REP) */\r
- case CEP_STATE_LAP_MRA_SENT: /* resend MRA(LAP) */\r
- case CEP_STATE_ESTABLISHED: /* resend RTU */\r
- case CEP_STATE_TIMEWAIT: /* resend the DREP */\r
- cl_memcpy( p_mad->p_mad_buf, &p_cep->mads, MAD_BLOCK_SIZE );\r
- p_mad->send_context1 = NULL;\r
- p_mad->send_context2 = NULL;\r
- __cep_send_mad( p_port_cep, p_mad );\r
- break;\r
-\r
- default:\r
- /* Return the MAD to the mad pool */\r
- ib_put_mad( p_mad );\r
- break;\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__process_rej(\r
- IN kcep_t* const p_cep,\r
- IN ib_mad_element_t* const p_mad )\r
-{\r
- ib_api_status_t status;\r
- mad_cm_rej_t *p_rej;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- ASSERT( p_cep );\r
- ASSERT( p_mad );\r
- ASSERT( p_mad->p_mad_buf );\r
-\r
- p_rej = (mad_cm_rej_t*)p_mad->p_mad_buf;\r
-\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM,\r
- ("Request rejected p_rej %p, reason - %d.\n", \r
- p_rej, cl_ntoh16(p_rej->reason) ) );\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_REQ_SENT:\r
- /*\r
- * Ignore rejects with the status set to IB_REJ_INVALID_SID. We will\r
- * continue to retry (up to max_cm_retries) to connect to the remote\r
- * side. This is required to support peer-to-peer connections.\r
- */\r
- if( p_cep->p2p && p_rej->reason == IB_REJ_INVALID_SID )\r
- {\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM,\r
- ("Request rejected (invalid SID) - retrying.\n") );\r
- goto err1;\r
- }\r
-\r
- /* Fall through */\r
- case CEP_STATE_REP_SENT:\r
- case CEP_STATE_REQ_MRA_RCVD:\r
- case CEP_STATE_REP_MRA_RCVD:\r
- /* Cancel any outstanding MAD. */\r
- if( p_cep->p_send_mad )\r
- {\r
- ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
- p_cep->p_send_mad = NULL;\r
- }\r
-\r
- /* Fall through */\r
- case CEP_STATE_REQ_RCVD:\r
- case CEP_STATE_REP_RCVD:\r
- case CEP_STATE_REQ_MRA_SENT:\r
- case CEP_STATE_REP_MRA_SENT:\r
- case CEP_STATE_PRE_REP:\r
- case CEP_STATE_PRE_REP_MRA_SENT:\r
- if( p_cep->state & CEP_STATE_PREP )\r
- {\r
- CL_ASSERT( p_cep->p_mad );\r
- ib_put_mad( p_cep->p_mad );\r
- p_cep->p_mad = NULL;\r
- }\r
- /* Abort connection establishment. No transition to timewait. */\r
- __remove_cep( p_cep );\r
- p_cep->state = CEP_STATE_IDLE;\r
- break;\r
-\r
- case CEP_STATE_ESTABLISHED:\r
- case CEP_STATE_LAP_RCVD:\r
- case CEP_STATE_LAP_SENT:\r
- case CEP_STATE_LAP_MRA_RCVD:\r
- case CEP_STATE_LAP_MRA_SENT:\r
- case CEP_STATE_PRE_APR:\r
- case CEP_STATE_PRE_APR_MRA_SENT:\r
- if( p_cep->state & CEP_STATE_PREP )\r
- {\r
- CL_ASSERT( p_cep->p_mad );\r
- ib_put_mad( p_cep->p_mad );\r
- p_cep->p_mad = NULL;\r
- }\r
- p_cep->state = CEP_STATE_TIMEWAIT;\r
- __insert_timewait( p_cep );\r
- break;\r
-\r
- default:\r
- /* Ignore the REJ. */\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("REJ received in invalid state.\n") );\r
-err1:\r
- ib_put_mad( p_mad );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_NO_MATCH;\r
- }\r
-\r
- status = __cep_queue_mad( p_cep, p_mad );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__process_stale(\r
- IN kcep_t* const p_cep )\r
-{\r
- ib_api_status_t status;\r
- cep_agent_t *p_port_cep;\r
- ib_mad_element_t *p_mad;\r
- mad_cm_rej_t *p_rej;\r
-\r
- status = __cep_get_mad( p_cep, CM_REJ_ATTR_ID, &p_port_cep, &p_mad );\r
- if( status != IB_SUCCESS )\r
- return status;\r
-\r
- p_rej = ib_get_mad_buf( p_mad );\r
-\r
- conn_rej_set_ari( NULL, 0, p_rej );\r
- conn_rej_set_pdata( NULL, 0, p_rej );\r
-\r
- p_rej->local_comm_id = p_cep->remote_comm_id;\r
- p_rej->remote_comm_id = p_cep->local_comm_id;\r
- p_rej->reason = IB_REJ_STALE_CONN;\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_REQ_RCVD:\r
- case CEP_STATE_REQ_MRA_SENT:\r
- case CEP_STATE_PRE_REP:\r
- case CEP_STATE_PRE_REP_MRA_SENT:\r
- conn_rej_set_msg_rejected( 0, p_rej );\r
- break;\r
-\r
- case CEP_STATE_REQ_SENT:\r
- case CEP_STATE_REP_RCVD:\r
- case CEP_STATE_REP_MRA_SENT:\r
- conn_rej_set_msg_rejected( 1, p_rej );\r
- break;\r
-\r
- default:\r
- conn_rej_set_msg_rejected( 2, p_rej );\r
- break;\r
- }\r
- conn_rej_clr_rsvd_fields( p_rej );\r
-\r
- return __process_rej( p_cep, p_mad );\r
-}\r
-\r
-\r
-static void\r
-__req_handler(\r
- IN cep_agent_t* const p_port_cep,\r
- IN ib_mad_element_t* const p_mad )\r
-{\r
- ib_api_status_t status = IB_SUCCESS;\r
- mad_cm_req_t *p_req;\r
- kcep_t *p_cep, *p_new_cep, *p_stale_cep = NULL;\r
- KLOCK_QUEUE_HANDLE hdl;\r
- ib_rej_status_t reason;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
-\r
- p_req = (mad_cm_req_t*)p_mad->p_mad_buf;\r
-\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM,\r
- ("REQ: comm_id (x%x) qpn (x%x) received\n",\r
- p_req->local_comm_id, conn_req_get_lcl_qpn( p_req )) );\r
-\r
- KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
-\r
- if( conn_req_get_qp_type( p_req ) > IB_QPT_UNRELIABLE_CONN )\r
- {\r
- /* Reserved value. Reject. */\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid transport type received.\n") );\r
- reason = IB_REJ_INVALID_XPORT;\r
- goto reject;\r
- }\r
-\r
- /* Match against pending connections using remote comm ID and CA GUID. */\r
- p_cep = __lookup_by_id( p_req->local_comm_id, p_req->local_ca_guid );\r
- if( p_cep )\r
- {\r
- /* Already received the REQ. */\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_REQ_MRA_SENT:\r
- __repeat_mad( p_port_cep, p_cep, p_mad );\r
- break;\r
-\r
- case CEP_STATE_TIMEWAIT:\r
- case CEP_STATE_DESTROY:\r
- /* Send a reject. */\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM,\r
- ("REQ received for connection in TIME_WAIT state.\n") );\r
- __reject_req( p_port_cep, p_mad, IB_REJ_STALE_CONN );\r
- break;\r
-\r
- default:\r
- /*\r
- * Let regular retries repeat the MAD. If our last message was\r
- * dropped, resending only adds to the congestion. If it wasn't\r
- * dropped, then the remote CM will eventually process it, and\r
- * we'd just be adding traffic.\r
- */\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("Duplicate REQ received.\n") );\r
- ib_put_mad( p_mad );\r
- }\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return;\r
- }\r
-\r
- /*\r
- * Allocate a new CEP for the new request. This will\r
- * prevent multiple identical REQs from queueing up for processing.\r
- */\r
- p_new_cep = __create_cep();\r
- if( !p_new_cep )\r
- {\r
- /* Reject the request for insufficient resources. */\r
- reason = IB_REJ_INSUF_RESOURCES;\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("al_create_cep failed\nREJ sent for insufficient resources.\n") );\r
- goto reject;\r
- }\r
-\r
- __save_wire_req( p_new_cep, p_req );\r
-\r
- /*\r
- * Match against listens using SID and compare data, also provide the receiving\r
- * MAD service's port GUID so we can properly filter.\r
- */\r
- p_cep = __lookup_listen( p_req->sid, p_port_cep->port_guid, p_req->pdata );\r
- if( p_cep )\r
- {\r
- __bind_cep( p_new_cep, p_cep->p_cid->h_al, p_cep->pfn_cb, NULL );\r
-\r
- /* Add the new CEP to the map so that repeated REQs match up. */\r
- p_stale_cep = __insert_cep( p_new_cep );\r
- if( p_stale_cep != p_new_cep )\r
- {\r
- /* Duplicate - must be a stale connection. */\r
- reason = IB_REJ_STALE_CONN;\r
- /* Fail the local stale CEP. */\r
- status = __process_stale( p_stale_cep );\r
- goto unbind;\r
- }\r
-\r
- /* __cep_queue_mad may complete a pending IRP */\r
- p_mad->send_context1 = p_new_cep; \r
-\r
- /*\r
- * Queue the mad - the return value indicates whether we should\r
- * invoke the callback.\r
- */\r
- status = __cep_queue_mad( p_cep, p_mad );\r
- switch( status )\r
- {\r
- case IB_SUCCESS:\r
- case IB_PENDING:\r
- break;\r
-\r
- case IB_UNSUPPORTED:\r
- p_mad->send_context1 = NULL;\r
- reason = IB_REJ_USER_DEFINED;\r
- goto unbind;\r
- \r
- default:\r
- p_mad->send_context1 = NULL;\r
- reason = IB_REJ_INSUF_RESOURCES;\r
- goto unbind;\r
- }\r
- }\r
- else\r
- {\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("No listens active!\n") );\r
-\r
- /* Match against peer-to-peer requests using SID and compare data. */\r
- //p_cep = __lookup_peer();\r
- //if( p_cep )\r
- //{\r
- // p_mad->send_context2 = NULL;\r
- // p_list_item = cl_qlist_find_from_head( &gp_cep_mgr->pending_list,\r
- // __match_peer, p_req );\r
- // if( p_list_item != cl_qlist_end( &gp_cep_mgr->pending_list ) )\r
- // {\r
- // KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
- // p_conn = PARENT_STRUCT( p_list_item, kcep_t, map_item );\r
- // __peer_req( p_port_cep, p_conn, p_async_mad->p_mad );\r
- // cl_free( p_async_mad );\r
- // AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM,\r
- // ("REQ matched a peer-to-peer request.\n") );\r
- // return;\r
- // }\r
- // reason = IB_REJ_INVALID_SID;\r
- // goto free;\r
- //}\r
- //else\r
- {\r
- /* No match found. Reject. */\r
- reason = IB_REJ_INVALID_SID;\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("REQ received but no match found.\n") );\r
- goto cleanup;\r
- }\r
- }\r
-\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
-\r
- /* Process any queued MADs for the CEP. */\r
- if( status == IB_SUCCESS )\r
- __process_cep( p_cep );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return;\r
-\r
-unbind:\r
- __unbind_cep( p_new_cep );\r
-\r
-cleanup:\r
- /*\r
- * Move the CEP in the idle state so that we don't send a reject\r
- * for it when cleaning up. Also clear the RQPN and RCID so that\r
- * we don't try to remove it from our maps (since it isn't inserted).\r
- */\r
- p_new_cep->state = CEP_STATE_IDLE;\r
- p_new_cep->remote_comm_id = 0;\r
- p_new_cep->remote_qpn = 0;\r
- __cleanup_cep( p_new_cep );\r
-\r
-reject:\r
- __reject_req( p_port_cep, p_mad, reason );\r
-\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
-\r
- if( reason == IB_REJ_STALE_CONN && status == IB_SUCCESS )\r
- __process_cep( p_stale_cep );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static void\r
-__save_wire_rep(\r
- IN OUT kcep_t* const p_cep,\r
- IN const mad_cm_rep_t* const p_rep )\r
-{\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- /* The send should have been cancelled during MRA processing. */\r
- p_cep->state = CEP_STATE_REP_RCVD;\r
-\r
- /* Store pertinent information in the connection. */\r
- p_cep->remote_comm_id = p_rep->local_comm_id;\r
- p_cep->remote_ca_guid = p_rep->local_ca_guid;\r
-\r
- p_cep->remote_qpn = conn_rep_get_lcl_qpn( p_rep );\r
-\r
- /* Store the remote endpoint's target ACK delay. */\r
- p_cep->target_ack_delay = conn_rep_get_target_ack_delay( p_rep );\r
-\r
- /* Update the local ACK delay stored in the AV's. */\r
- p_cep->av[0].attr.conn.local_ack_timeout = calc_lcl_ack_timeout(\r
- p_cep->av[0].attr.conn.local_ack_timeout, p_cep->target_ack_delay );\r
- p_cep->av[0].attr.conn.rnr_retry_cnt = conn_rep_get_rnr_retry_cnt( p_rep );\r
-\r
- if( p_cep->av[1].port_guid )\r
- {\r
- p_cep->av[1].attr.conn.local_ack_timeout = calc_lcl_ack_timeout(\r
- p_cep->av[1].attr.conn.local_ack_timeout,\r
- p_cep->target_ack_delay );\r
- p_cep->av[1].attr.conn.rnr_retry_cnt =\r
- p_cep->av[0].attr.conn.rnr_retry_cnt;\r
- }\r
-\r
- p_cep->init_depth = p_rep->resp_resources;\r
- p_cep->resp_res = p_rep->initiator_depth;\r
-\r
- p_cep->sq_psn = conn_rep_get_starting_psn( p_rep );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static void\r
-__mra_handler(\r
- IN ib_mad_element_t* const p_mad )\r
-{\r
- ib_api_status_t status;\r
- mad_cm_mra_t *p_mra;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
-\r
- p_mra = (mad_cm_mra_t*)p_mad->p_mad_buf;\r
-\r
- KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( NULL, p_mra->remote_comm_id );\r
- if( !p_cep )\r
- {\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM,\r
- ("MRA received that could not be matched.\n") );\r
- goto err;\r
- }\r
-\r
- if( p_cep->remote_comm_id )\r
- {\r
- if( p_cep->remote_comm_id != p_mra->local_comm_id )\r
- {\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM,\r
- ("MRA received that could not be matched.\n") );\r
- goto err;\r
- }\r
- }\r
-\r
- /*\r
- * Note that we don't update the CEP's remote comm ID - it messes up REP\r
- * processing since a non-zero RCID implies the connection is in the RCID\r
- * map. Adding it here requires checking there and conditionally adding\r
- * it. Ignoring it is a valid thing to do.\r
- */\r
- if( !(p_cep->state & CEP_STATE_SENT) ||\r
- (1 << conn_mra_get_msg_mraed( p_mra ) !=\r
- (p_cep->state & CEP_MSG_MASK)) )\r
- {\r
- /* Invalid state. */\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("MRA received in invalid state.\n") );\r
- goto err;\r
- }\r
-\r
- /* Delay the current send. */\r
- CL_ASSERT( p_cep->p_send_mad );\r
- ib_delay_mad( p_cep->h_mad_svc, p_cep->p_send_mad,\r
- __calc_mad_timeout( conn_mra_get_svc_timeout( p_mra ) ) +\r
- __calc_mad_timeout( p_cep->max_2pkt_life - 1 ) );\r
-\r
- /* We only invoke a single callback for MRA. */\r
- if( p_cep->state & CEP_STATE_MRA )\r
- {\r
- /* Invalid state. */\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("Already received MRA.\n") );\r
- goto err;\r
- }\r
-\r
- p_cep->state |= CEP_STATE_MRA;\r
-\r
- status = __cep_queue_mad( p_cep, p_mad );\r
-\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
-\r
- if( status == IB_SUCCESS )\r
- __process_cep( p_cep );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return;\r
-\r
-err:\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
- ib_put_mad( p_mad );\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static void\r
-__rej_handler(\r
- IN ib_mad_element_t* const p_mad )\r
-{\r
- ib_api_status_t status;\r
- mad_cm_rej_t *p_rej;\r
- kcep_t *p_cep = NULL;\r
- KLOCK_QUEUE_HANDLE hdl;\r
- net64_t ca_guid;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
-\r
- p_rej = (mad_cm_rej_t*)p_mad->p_mad_buf;\r
-\r
- /* Either one of the communication IDs must be set. */\r
- if( !p_rej->remote_comm_id && !p_rej->local_comm_id )\r
- goto err1;\r
-\r
- /* Check the pending list by the remote CA GUID and connection ID. */\r
- KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
- if( p_rej->remote_comm_id )\r
- {\r
- p_cep = __lookup_cep( NULL, p_rej->remote_comm_id );\r
- }\r
- else if( p_rej->reason == IB_REJ_TIMEOUT &&\r
- conn_rej_get_ari_len( p_rej ) == sizeof(net64_t) )\r
- {\r
- cl_memcpy( &ca_guid, p_rej->ari, sizeof(net64_t) );\r
- p_cep = __lookup_by_id( p_rej->local_comm_id, ca_guid );\r
- }\r
-\r
- if( !p_cep )\r
- {\r
- goto err2;\r
- }\r
-\r
- if( p_cep->remote_comm_id &&\r
- p_cep->remote_comm_id != p_rej->local_comm_id )\r
- {\r
- err2:\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
- err1:\r
- ib_put_mad( p_mad );\r
- AL_EXIT( AL_DBG_CM );\r
- return;\r
- }\r
-\r
- status = __process_rej( p_cep, p_mad );\r
-\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
-\r
- if( status == IB_SUCCESS )\r
- __process_cep( p_cep );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static void\r
-__rep_handler(\r
- IN cep_agent_t* const p_port_cep,\r
- IN ib_mad_element_t* const p_mad )\r
-{\r
- ib_api_status_t status;\r
- mad_cm_rep_t *p_rep;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
- cep_state_t old_state;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
-\r
- p_rep = (mad_cm_rep_t*)p_mad->p_mad_buf;\r
-\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM,\r
- ("REP: comm_id (x%x) received\n", p_rep->local_comm_id ) );\r
-\r
- KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( NULL, p_rep->remote_comm_id );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
- ib_put_mad( p_mad );\r
- AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_CM,\r
- ("REP received that could not be matched.\n") );\r
- return;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_REQ_MRA_RCVD:\r
- case CEP_STATE_REQ_SENT:\r
- old_state = p_cep->state;\r
- /* Save pertinent information and change state. */\r
- __save_wire_rep( p_cep, p_rep );\r
-\r
- if( __insert_cep( p_cep ) != p_cep )\r
- {\r
- /* Roll back the state change. */\r
- __reject_mad( p_port_cep, p_cep, p_mad, IB_REJ_STALE_CONN );\r
- p_cep->state = old_state;\r
- status = __process_stale( p_cep );\r
- }\r
- else\r
- {\r
- /*\r
- * Cancel any outstanding send. Note that we do this only after\r
- * inserting the CEP - if we failed, then the send will timeout\r
- * and we'll finish our way through the state machine.\r
- */\r
- if( p_cep->p_send_mad )\r
- {\r
- ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
- p_cep->p_send_mad = NULL;\r
- }\r
-\r
- status = __cep_queue_mad( p_cep, p_mad );\r
- }\r
-\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
-\r
- if( status == IB_SUCCESS )\r
- __process_cep( p_cep );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return;\r
-\r
- case CEP_STATE_ESTABLISHED:\r
- case CEP_STATE_LAP_RCVD:\r
- case CEP_STATE_LAP_SENT:\r
- case CEP_STATE_LAP_MRA_RCVD:\r
- case CEP_STATE_LAP_MRA_SENT:\r
- case CEP_STATE_REP_MRA_SENT:\r
- /* Repeate the MRA or RTU. */\r
- __repeat_mad( p_port_cep, p_cep, p_mad );\r
- break;\r
-\r
- default:\r
- ib_put_mad( p_mad );\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("REP received in invalid state.\n") );\r
- break;\r
- }\r
-\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static void\r
-__rtu_handler(\r
- IN ib_mad_element_t* const p_mad )\r
-{\r
- ib_api_status_t status;\r
- mad_cm_rtu_t *p_rtu;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
-\r
- p_rtu = (mad_cm_rtu_t*)p_mad->p_mad_buf;\r
-\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM,\r
- ("RTU: comm_id (x%x) received\n", p_rtu->local_comm_id) );\r
-\r
- /* Find the connection by local connection ID. */\r
- KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( NULL, p_rtu->remote_comm_id );\r
- if( !p_cep || p_cep->remote_comm_id != p_rtu->local_comm_id )\r
- {\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("RTU received that could not be matched.\n") );\r
- goto done;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_REP_SENT:\r
- case CEP_STATE_REP_MRA_RCVD:\r
- /* Cancel any outstanding send. */\r
- if( p_cep->p_send_mad )\r
- {\r
- ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
- p_cep->p_send_mad = NULL;\r
- }\r
-\r
- p_cep->state = CEP_STATE_ESTABLISHED;\r
-\r
- status = __cep_queue_mad( p_cep, p_mad );\r
-\r
- /* Update timewait time. */\r
- __calc_timewait( p_cep );\r
-\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
-\r
- if( status == IB_SUCCESS )\r
- __process_cep( p_cep );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return;\r
-\r
- default:\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("RTU received in invalid state.\n") );\r
- break;\r
- }\r
-\r
-done:\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
- ib_put_mad( p_mad );\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static void\r
-__dreq_handler(\r
- IN cep_agent_t* const p_port_cep,\r
- IN ib_mad_element_t* const p_mad )\r
-{\r
- ib_api_status_t status;\r
- mad_cm_dreq_t *p_dreq;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
-\r
- p_dreq = (mad_cm_dreq_t*)p_mad->p_mad_buf;\r
-\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM,\r
- ("DREQ: comm_id (x%x) qpn (x%x) received\n",\r
- p_dreq->local_comm_id, conn_dreq_get_remote_qpn( p_dreq )) );\r
-\r
- /* Find the connection by connection IDs. */\r
- KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( NULL, p_dreq->remote_comm_id );\r
- if( !p_cep ||\r
- p_cep->remote_comm_id != p_dreq->local_comm_id ||\r
- p_cep->local_qpn != conn_dreq_get_remote_qpn( p_dreq ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("DREQ received that could not be matched.\n") );\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
- ib_put_mad( p_mad );\r
- AL_EXIT( AL_DBG_CM );\r
- return;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_REP_SENT:\r
- case CEP_STATE_REP_MRA_RCVD:\r
- case CEP_STATE_DREQ_SENT:\r
- /* Cancel the outstanding MAD. */\r
- if( p_cep->p_send_mad )\r
- {\r
- ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
- p_cep->p_send_mad = NULL;\r
- }\r
-\r
- /* Fall through and process as DREQ received case. */\r
- case CEP_STATE_ESTABLISHED:\r
- case CEP_STATE_LAP_RCVD:\r
- case CEP_STATE_LAP_SENT:\r
- case CEP_STATE_LAP_MRA_RCVD:\r
- case CEP_STATE_LAP_MRA_SENT:\r
- p_cep->state = CEP_STATE_DREQ_RCVD;\r
-\r
- status = __cep_queue_mad( p_cep, p_mad );\r
-\r
- /* Store the TID for use in the reply DREP. */\r
- p_cep->tid = p_dreq->hdr.trans_id;\r
-\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
-\r
- if( status == IB_SUCCESS )\r
- __process_cep( p_cep );\r
- AL_EXIT( AL_DBG_CM );\r
- return;\r
-\r
- case CEP_STATE_TIMEWAIT:\r
- case CEP_STATE_DESTROY:\r
- /* Repeat the DREP. */\r
- __repeat_mad( p_port_cep, p_cep, p_mad );\r
- break;\r
-\r
- case CEP_STATE_DREQ_DESTROY:\r
- /* Send the DREP with no private data. */\r
-\r
- ib_put_mad( p_mad ); /* release DREQ MAD */\r
-\r
- status = __cep_get_mad( p_cep, CM_DREP_ATTR_ID, &(cep_agent_t*)p_port_cep, \r
- &(ib_mad_element_t*)p_mad );\r
- if( status != IB_SUCCESS )\r
- break;\r
-\r
- p_mad->p_mad_buf->attr_id = CM_DREP_ATTR_ID;\r
- /* __format_drep returns always SUCCESS while no private data */\r
- __format_drep( p_cep, NULL, 0, (mad_cm_drep_t*)p_mad->p_mad_buf );\r
- __cep_send_mad( p_port_cep, p_mad );\r
- break;\r
-\r
- default:\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("DREQ received in invalid state.\n") );\r
- case CEP_STATE_DREQ_RCVD:\r
- ib_put_mad( p_mad );\r
- break;\r
- }\r
-\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static void\r
-__drep_handler(\r
- IN ib_mad_element_t* const p_mad )\r
-{\r
- ib_api_status_t status;\r
- mad_cm_drep_t *p_drep;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
-\r
- p_drep = (mad_cm_drep_t*)p_mad->p_mad_buf;\r
-\r
- /* Find the connection by local connection ID. */\r
- KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( NULL, p_drep->remote_comm_id );\r
- if( !p_cep || p_cep->remote_comm_id != p_drep->local_comm_id )\r
- {\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("DREP received that could not be matched.\n") );\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
- ib_put_mad( p_mad );\r
- AL_EXIT( AL_DBG_CM );\r
- return;\r
- }\r
-\r
- if( p_cep->state != CEP_STATE_DREQ_SENT &&\r
- p_cep->state != CEP_STATE_DREQ_DESTROY )\r
- {\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("DREP received in invalid state.\n") );\r
-\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
- ib_put_mad( p_mad );\r
- AL_EXIT( AL_DBG_CM );\r
- return;\r
- }\r
-\r
- /* Cancel the DREQ. */\r
- if( p_cep->p_send_mad )\r
- {\r
- ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
- p_cep->p_send_mad = NULL;\r
- }\r
-\r
- if( p_cep->state == CEP_STATE_DREQ_SENT )\r
- {\r
- p_cep->state = CEP_STATE_TIMEWAIT;\r
-\r
- status = __cep_queue_mad( p_cep, p_mad );\r
- }\r
- else\r
- {\r
- /* State is DREQ_DESTROY - move to DESTROY to allow cleanup. */\r
- CL_ASSERT( p_cep->state == CEP_STATE_DREQ_DESTROY );\r
- p_cep->state = CEP_STATE_DESTROY;\r
-\r
- ib_put_mad( p_mad );\r
- status = IB_INVALID_STATE;\r
- }\r
-\r
- __insert_timewait( p_cep );\r
-\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
-\r
- if( status == IB_SUCCESS )\r
- __process_cep( p_cep );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static boolean_t\r
-__format_lap_av(\r
- IN kcep_t* const p_cep,\r
- IN const lap_path_info_t* const p_path )\r
-{\r
- cep_agent_t *p_port_cep;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( p_cep );\r
- CL_ASSERT( p_path );\r
-\r
- cl_memclr( &p_cep->alt_av, sizeof(kcep_av_t) );\r
-\r
- p_port_cep = __find_port_cep( &p_path->remote_gid, p_path->remote_lid,\r
- p_cep->pkey, &p_cep->alt_av.pkey_index );\r
- if( !p_port_cep )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return FALSE;\r
- }\r
-\r
- if( p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return FALSE;\r
- }\r
-\r
- p_cep->alt_av.port_guid = p_port_cep->port_guid;\r
- p_cep->alt_av.attr.port_num = p_port_cep->port_num;\r
-\r
- p_cep->alt_av.attr.sl = conn_lap_path_get_svc_lvl( p_path );\r
- p_cep->alt_av.attr.dlid = p_path->local_lid;\r
-\r
- if( !conn_lap_path_get_subn_lcl( p_path ) )\r
- {\r
- p_cep->alt_av.attr.grh_valid = TRUE;\r
- p_cep->alt_av.attr.grh.ver_class_flow = ib_grh_set_ver_class_flow(\r
- 1, conn_lap_path_get_tclass( p_path ),\r
- conn_lap_path_get_flow_lbl( p_path ) );\r
- p_cep->alt_av.attr.grh.hop_limit = p_path->hop_limit;\r
- p_cep->alt_av.attr.grh.dest_gid = p_path->local_gid;\r
- p_cep->alt_av.attr.grh.src_gid = p_path->remote_gid;\r
- }\r
- else\r
- {\r
- p_cep->alt_av.attr.grh_valid = FALSE;\r
- }\r
- p_cep->alt_av.attr.static_rate = conn_lap_path_get_pkt_rate( p_path );\r
- p_cep->alt_av.attr.path_bits =\r
- (uint8_t)(p_path->remote_lid - p_port_cep->base_lid);\r
-\r
- /*\r
- * Note that while we never use the connected AV attributes internally,\r
- * we store them so we can pass them back to users. For the LAP, we\r
- * first copy the settings from the current primary - MTU and retry\r
- * counts are only specified in the REQ.\r
- */\r
- p_cep->alt_av.attr.conn = p_cep->av[p_cep->idx_primary].attr.conn;\r
- p_cep->alt_av.attr.conn.local_ack_timeout =\r
- conn_lap_path_get_lcl_ack_timeout( p_path );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return TRUE;\r
-}\r
-\r
-\r
-static void\r
-__lap_handler(\r
- IN cep_agent_t* const p_port_cep,\r
- IN ib_mad_element_t* const p_mad )\r
-{\r
- ib_api_status_t status;\r
- mad_cm_lap_t *p_lap;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
-\r
- p_lap = (mad_cm_lap_t*)p_mad->p_mad_buf;\r
-\r
- /* Find the connection by local connection ID. */\r
- KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( NULL, p_lap->remote_comm_id );\r
- if( !p_cep || p_cep->remote_comm_id != p_lap->local_comm_id )\r
- {\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
- ib_put_mad( p_mad );\r
- AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("LAP received that could not be matched.\n") );\r
- return;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_REP_SENT:\r
- case CEP_STATE_REP_MRA_RCVD:\r
- /*\r
- * These two cases handle the RTU being dropped. Receipt of\r
- * a LAP indicates that the connection is established.\r
- */\r
- case CEP_STATE_ESTABLISHED:\r
- /*\r
- * We don't check for other "established" states related to\r
- * alternate path management (CEP_STATE_LAP_RCVD, etc)\r
- */\r
-\r
- /* We only support receiving LAP if we took the passive role. */\r
- if( p_cep->was_active )\r
- {\r
- ib_put_mad( p_mad );\r
- break;\r
- }\r
-\r
- /* Store the transaction ID for use during the LAP exchange. */\r
- p_cep->tid = p_lap->hdr.trans_id;\r
-\r
- /*\r
- * Copy the path record into the connection for use when\r
- * sending the APR and loading the path.\r
- */\r
- if( !__format_lap_av( p_cep, &p_lap->alternate_path ) )\r
- {\r
- /* Trap an invalid path. */\r
- ib_put_mad( p_mad );\r
- break;\r
- }\r
-\r
- p_cep->state = CEP_STATE_LAP_RCVD;\r
-\r
- status = __cep_queue_mad( p_cep, p_mad );\r
-\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
-\r
- if( status == IB_SUCCESS )\r
- __process_cep( p_cep );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return;\r
-\r
- case CEP_STATE_LAP_MRA_SENT:\r
- __repeat_mad( p_port_cep, p_cep, p_mad );\r
- break;\r
-\r
- default:\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("LAP received in invalid state.\n") );\r
- ib_put_mad( p_mad );\r
- break;\r
- }\r
-\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static void\r
-__apr_handler(\r
- IN ib_mad_element_t* const p_mad )\r
-{\r
- ib_api_status_t status;\r
- mad_cm_apr_t *p_apr;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
-\r
- p_apr = (mad_cm_apr_t*)p_mad->p_mad_buf;\r
-\r
- KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( NULL, p_apr->remote_comm_id );\r
- if( !p_cep || p_cep->remote_comm_id != p_apr->local_comm_id )\r
- {\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("APR received that could not be matched.\n") );\r
- goto done;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_LAP_SENT:\r
- case CEP_STATE_LAP_MRA_RCVD:\r
- /* Cancel sending the LAP. */\r
- if( p_cep->p_send_mad )\r
- {\r
- ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
- p_cep->p_send_mad = NULL;\r
- }\r
-\r
- /* Copy the temporary alternate AV. */\r
- p_cep->av[(p_cep->idx_primary + 1) & 0x1] = p_cep->alt_av;\r
-\r
- /* Update the maximum packet lifetime. */\r
- p_cep->max_2pkt_life = max( p_cep->max_2pkt_life, p_cep->alt_2pkt_life );\r
-\r
- /* Update the timewait time. */\r
- __calc_timewait( p_cep );\r
-\r
- p_cep->state = CEP_STATE_ESTABLISHED;\r
-\r
- status = __cep_queue_mad( p_cep, p_mad );\r
-\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
-\r
- if( status == IB_SUCCESS )\r
- __process_cep( p_cep );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return;\r
-\r
- default:\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("APR received in invalid state.\n") );\r
- break;\r
- }\r
-\r
-done:\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
- ib_put_mad( p_mad );\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static void\r
-__cep_mad_recv_cb(\r
- IN ib_mad_svc_handle_t h_mad_svc,\r
- IN void *context,\r
- IN ib_mad_element_t *p_mad )\r
-{\r
- cep_agent_t *p_port_cep;\r
- ib_mad_t *p_hdr;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
-\r
- UNUSED_PARAM( h_mad_svc );\r
- p_port_cep = (cep_agent_t*)context;\r
-\r
- CL_ASSERT( p_mad->p_next == NULL );\r
-\r
- p_hdr = (ib_mad_t*)p_mad->p_mad_buf;\r
-\r
- /*\r
- * TODO: Add filtering in all the handlers for unsupported class version.\r
- * See 12.6.7.2 Rejection Reason, code 31.\r
- */\r
-\r
- switch( p_hdr->attr_id )\r
- {\r
- case CM_REQ_ATTR_ID:\r
- __req_handler( p_port_cep, p_mad );\r
- break;\r
-\r
- case CM_MRA_ATTR_ID:\r
- __mra_handler( p_mad );\r
- break;\r
-\r
- case CM_REJ_ATTR_ID:\r
- __rej_handler( p_mad );\r
- break;\r
-\r
- case CM_REP_ATTR_ID:\r
- __rep_handler( p_port_cep, p_mad );\r
- break;\r
-\r
- case CM_RTU_ATTR_ID:\r
- __rtu_handler( p_mad );\r
- break;\r
-\r
- case CM_DREQ_ATTR_ID:\r
- __dreq_handler( p_port_cep, p_mad );\r
- break;\r
-\r
- case CM_DREP_ATTR_ID:\r
- __drep_handler( p_mad );\r
- break;\r
-\r
- case CM_LAP_ATTR_ID:\r
- __lap_handler( p_port_cep, p_mad );\r
- break;\r
-\r
- case CM_APR_ATTR_ID:\r
- __apr_handler( p_mad );\r
- break;\r
-\r
- case CM_SIDR_REQ_ATTR_ID:\r
-// p_async_mad->item.pfn_callback = __process_cm_sidr_req;\r
-// break;\r
-//\r
- case CM_SIDR_REP_ATTR_ID:\r
-// p_async_mad->item.pfn_callback = __process_cm_sidr_rep;\r
-// break;\r
-//\r
- default:\r
- ib_put_mad( p_mad );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Invalid CM MAD attribute ID.\n") );\r
- return;\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static inline cep_agent_t*\r
-__get_cep_agent(\r
- IN kcep_t* const p_cep )\r
-{\r
- cl_map_item_t *p_item;\r
-\r
- CL_ASSERT( p_cep );\r
-\r
- /* Look up the primary CEP port agent */\r
- p_item = cl_qmap_get( &gp_cep_mgr->port_map,\r
- p_cep->av[p_cep->idx_primary].port_guid );\r
- if( p_item == cl_qmap_end( &gp_cep_mgr->port_map ) )\r
- return NULL;\r
-\r
- return PARENT_STRUCT( p_item, cep_agent_t, item );\r
-}\r
-\r
-\r
-static inline void\r
-__format_mad_av(\r
- OUT ib_mad_element_t* const p_mad,\r
- IN kcep_av_t* const p_av )\r
-{\r
- /* Set the addressing information in the MAD. */\r
- p_mad->grh_valid = p_av->attr.grh_valid;\r
- if( p_av->attr.grh_valid )\r
- cl_memcpy( p_mad->p_grh, &p_av->attr.grh, sizeof(ib_grh_t) );\r
-\r
- p_mad->remote_sl = p_av->attr.sl;\r
- p_mad->remote_lid = p_av->attr.dlid;\r
- p_mad->path_bits = p_av->attr.path_bits;\r
- p_mad->pkey_index = p_av->pkey_index;\r
- p_mad->remote_qp = IB_QP1;\r
- p_mad->send_opt = IB_SEND_OPT_SIGNALED;\r
- p_mad->remote_qkey = IB_QP1_WELL_KNOWN_Q_KEY;\r
- /* Let the MAD service manage the AV for us. */\r
- p_mad->h_av = NULL;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__cep_send_mad(\r
- IN cep_agent_t* const p_port_cep,\r
- IN ib_mad_element_t* const p_mad )\r
-{\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( p_port_cep );\r
- CL_ASSERT( p_mad );\r
-\r
- /* Use the mad's attributes already present */\r
- p_mad->resp_expected = FALSE;\r
- p_mad->retry_cnt = 0;\r
- p_mad->timeout_ms = 0;\r
-\r
- /* Clear the contexts since the send isn't associated with a CEP. */\r
- p_mad->context1 = NULL;\r
- p_mad->context2 = NULL;\r
-\r
- status = ib_send_mad( p_port_cep->h_mad_svc, p_mad, NULL );\r
- if( status != IB_SUCCESS )\r
- {\r
- ib_put_mad( p_mad );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_send_mad failed with status %s.\n", ib_get_err_str(status)) );\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__cep_send_retry(\r
- IN cep_agent_t* const p_port_cep,\r
- IN kcep_t* const p_cep,\r
- IN ib_mad_element_t* const p_mad )\r
-{\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( p_cep );\r
- CL_ASSERT( p_mad );\r
- CL_ASSERT( p_mad->p_mad_buf->attr_id == CM_REQ_ATTR_ID ||\r
- p_mad->p_mad_buf->attr_id == CM_REP_ATTR_ID ||\r
- p_mad->p_mad_buf->attr_id == CM_LAP_ATTR_ID ||\r
- p_mad->p_mad_buf->attr_id == CM_DREQ_ATTR_ID );\r
-\r
- /*\r
- * REQ, REP, and DREQ are retried until either a response is\r
- * received or the operation times out.\r
- */\r
- p_mad->resp_expected = TRUE;\r
- p_mad->retry_cnt = p_cep->max_cm_retries;\r
- p_mad->timeout_ms = p_cep->retry_timeout;\r
-\r
- CL_ASSERT( !p_cep->p_send_mad );\r
-\r
- /* Store the mad & mad service handle in the CEP for cancelling. */\r
- p_cep->h_mad_svc = p_port_cep->h_mad_svc;\r
- p_cep->p_send_mad = p_mad;\r
-\r
- /* reference the connection for which we are sending the MAD. */\r
- cl_atomic_inc( &p_cep->ref_cnt );\r
-\r
- /* Set the context. */\r
- p_mad->context1 = p_cep;\r
- p_mad->context2 = NULL;\r
-\r
- /* Fire in the hole! */\r
- status = ib_send_mad( p_cep->h_mad_svc, p_mad, NULL );\r
- if( status != IB_SUCCESS )\r
- {\r
- /*\r
- * Note that we don't need to check for destruction here since\r
- * we're holding the global lock.\r
- */\r
- cl_atomic_dec( &p_cep->ref_cnt );\r
- p_cep->p_send_mad = NULL;\r
- ib_put_mad( p_mad );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_send_mad failed with status %s.\n", ib_get_err_str(status)) );\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-static void\r
-__cep_mad_send_cb(\r
- IN ib_mad_svc_handle_t h_mad_svc,\r
- IN void *context,\r
- IN ib_mad_element_t *p_mad )\r
-{\r
- ib_api_status_t status;\r
- cep_agent_t *p_port_cep;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
- ib_pfn_destroy_cb_t pfn_destroy_cb;\r
- void *cep_context;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- UNUSED_PARAM( h_mad_svc );\r
- CL_ASSERT( p_mad->p_next == NULL );\r
- CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
-\r
- p_port_cep = (cep_agent_t*)context;\r
-\r
- p_cep = (kcep_t* __ptr64)p_mad->context1;\r
-\r
- /*\r
- * The connection context is not set when performing immediate responses,\r
- * such as repeating MADS.\r
- */\r
- if( !p_cep )\r
- {\r
- ib_put_mad( p_mad );\r
- AL_EXIT( AL_DBG_CM );\r
- return;\r
- }\r
-\r
- p_mad->context1 = NULL;\r
-\r
- KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
- /* Clear the sent MAD pointer so that we don't try cancelling again. */\r
- if( p_cep->p_send_mad == p_mad )\r
- p_cep->p_send_mad = NULL;\r
-\r
- switch( p_mad->status )\r
- {\r
- case IB_WCS_SUCCESS:\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
- ib_put_mad( p_mad );\r
- break;\r
-\r
- case IB_WCS_CANCELED:\r
- if( p_cep->state != CEP_STATE_REQ_SENT &&\r
- p_cep->state != CEP_STATE_REQ_MRA_RCVD &&\r
- p_cep->state != CEP_STATE_REP_SENT &&\r
- p_cep->state != CEP_STATE_REP_MRA_RCVD &&\r
- p_cep->state != CEP_STATE_LAP_SENT &&\r
- p_cep->state != CEP_STATE_LAP_MRA_RCVD &&\r
- p_cep->state != CEP_STATE_DREQ_SENT &&\r
- p_cep->state != CEP_STATE_SREQ_SENT )\r
- {\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
- ib_put_mad( p_mad );\r
- break;\r
- }\r
- /* Treat as a timeout so we don't stall the state machine. */\r
- p_mad->status = IB_WCS_TIMEOUT_RETRY_ERR;\r
-\r
- /* Fall through. */\r
- case IB_WCS_TIMEOUT_RETRY_ERR:\r
- default:\r
- /* Timeout. Reject the connection. */\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_REQ_SENT:\r
- case CEP_STATE_REQ_MRA_RCVD:\r
- case CEP_STATE_REP_SENT:\r
- case CEP_STATE_REP_MRA_RCVD:\r
- /* Send the REJ. */\r
- __reject_timeout( p_port_cep, p_cep, p_mad );\r
- __remove_cep( p_cep );\r
- p_cep->state = CEP_STATE_IDLE;\r
- break;\r
-\r
- case CEP_STATE_DREQ_DESTROY:\r
- p_cep->state = CEP_STATE_DESTROY;\r
- __insert_timewait( p_cep );\r
- /* Fall through. */\r
-\r
- case CEP_STATE_DESTROY:\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
- ib_put_mad( p_mad );\r
- goto done;\r
-\r
- case CEP_STATE_DREQ_SENT:\r
- /*\r
- * Make up a DREP mad so we can respond if we receive\r
- * a DREQ while in timewait.\r
- */\r
- __format_mad_hdr( &p_cep->mads.drep.hdr, p_cep, CM_DREP_ATTR_ID );\r
- __format_drep( p_cep, NULL, 0, &p_cep->mads.drep );\r
- p_cep->state = CEP_STATE_TIMEWAIT;\r
- __insert_timewait( p_cep );\r
-\r
- default:\r
- break;\r
- }\r
-\r
- status = __cep_queue_mad( p_cep, p_mad );\r
- CL_ASSERT( status != IB_INVALID_STATE );\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
-\r
- if( status == IB_SUCCESS )\r
- __process_cep( p_cep );\r
- break;\r
- }\r
-\r
-done:\r
- pfn_destroy_cb = p_cep->pfn_destroy_cb;\r
- cep_context = p_cep->context;\r
-\r
- if( !cl_atomic_dec( &p_cep->ref_cnt ) && pfn_destroy_cb )\r
- pfn_destroy_cb( cep_context );\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static void\r
-__cep_qp_event_cb(\r
- IN ib_async_event_rec_t *p_event_rec )\r
-{\r
- UNUSED_PARAM( p_event_rec );\r
-\r
- /*\r
- * Most of the QP events are trapped by the real owner of the QP.\r
- * For real events, the CM may not be able to do much anyways!\r
- */\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__init_data_svc(\r
- IN cep_agent_t* const p_port_cep,\r
- IN const ib_port_attr_t* const p_port_attr )\r
-{\r
- ib_api_status_t status;\r
- ib_qp_create_t qp_create;\r
- ib_mad_svc_t mad_svc;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- /*\r
- * Create the PD alias. We use the port CM's al_obj_t as the context\r
- * to allow using deref_al_obj as the destroy callback.\r
- */\r
- status = ib_alloc_pd( p_port_cep->h_ca, IB_PDT_ALIAS, &p_port_cep->obj,\r
- &p_port_cep->h_pd );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_alloc_pd failed with status %s\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
- /* Reference the port object on behalf of the PD. */\r
- ref_al_obj( &p_port_cep->obj );\r
-\r
- /* Create the MAD QP. */\r
- cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
- qp_create.qp_type = IB_QPT_QP1_ALIAS;\r
- qp_create.rq_depth = CEP_MAD_RQ_DEPTH;\r
- qp_create.sq_depth = CEP_MAD_SQ_DEPTH;\r
- qp_create.rq_sge = CEP_MAD_RQ_SGE;\r
- qp_create.sq_sge = CEP_MAD_SQ_SGE;\r
- qp_create.sq_signaled = TRUE;\r
- /*\r
- * We use the port CM's al_obj_t as the context to allow using\r
- * deref_al_obj as the destroy callback.\r
- */\r
- status = ib_get_spl_qp( p_port_cep->h_pd, p_port_attr->port_guid,\r
- &qp_create, &p_port_cep->obj, __cep_qp_event_cb, &p_port_cep->pool_key,\r
- &p_port_cep->h_qp );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_get_spl_qp failed with status %s\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
- /* Reference the port object on behalf of the QP. */\r
- ref_al_obj( &p_port_cep->obj );\r
-\r
- /* Create the MAD service. */\r
- cl_memclr( &mad_svc, sizeof(mad_svc) );\r
- mad_svc.mad_svc_context = p_port_cep;\r
- mad_svc.pfn_mad_recv_cb = __cep_mad_recv_cb;\r
- mad_svc.pfn_mad_send_cb = __cep_mad_send_cb;\r
- mad_svc.support_unsol = TRUE;\r
- mad_svc.mgmt_class = IB_MCLASS_COMM_MGMT;\r
- mad_svc.mgmt_version = IB_MCLASS_CM_VER_2;\r
- mad_svc.method_array[IB_MAD_METHOD_SEND] = TRUE;\r
- status =\r
- ib_reg_mad_svc( p_port_cep->h_qp, &mad_svc, &p_port_cep->h_mad_svc );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_reg_mad_svc failed with status %s\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Performs immediate cleanup of resources.\r
- */\r
-static void\r
-__destroying_port_cep(\r
- IN al_obj_t *p_obj )\r
-{\r
- cep_agent_t *p_port_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_port_cep = PARENT_STRUCT( p_obj, cep_agent_t, obj );\r
-\r
- if( p_port_cep->port_guid )\r
- {\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- cl_qmap_remove_item( &gp_cep_mgr->port_map, &p_port_cep->item );\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- }\r
-\r
- if( p_port_cep->h_qp )\r
- {\r
- ib_destroy_qp( p_port_cep->h_qp, (ib_pfn_destroy_cb_t)deref_al_obj );\r
- p_port_cep->h_qp = NULL;\r
- }\r
-\r
- if( p_port_cep->h_pd )\r
- {\r
- ib_dealloc_pd( p_port_cep->h_pd, (ib_pfn_destroy_cb_t)deref_al_obj );\r
- p_port_cep->h_pd = NULL;\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Release all resources allocated by a port CM agent. Finishes any cleanup\r
- * for a port agent.\r
- */\r
-static void\r
-__free_port_cep(\r
- IN al_obj_t *p_obj )\r
-{\r
- cep_agent_t *p_port_cep;\r
- ib_port_attr_mod_t port_attr_mod;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_port_cep = PARENT_STRUCT( p_obj, cep_agent_t, obj );\r
-\r
- if( p_port_cep->h_ca )\r
- {\r
- /* Update local port attributes */\r
- port_attr_mod.cap.cm = FALSE;\r
- ib_modify_ca( p_port_cep->h_ca, p_port_cep->port_num,\r
- IB_CA_MOD_IS_CM_SUPPORTED, &port_attr_mod );\r
-\r
- deref_al_obj( &p_port_cep->h_ca->obj );\r
- }\r
-\r
- destroy_al_obj( &p_port_cep->obj );\r
- cl_free( p_port_cep );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-/*\r
- * Create a port agent for a given port.\r
- */\r
-static ib_api_status_t\r
-__create_port_cep(\r
- IN ib_pnp_port_rec_t *p_pnp_rec )\r
-{\r
- cep_agent_t *p_port_cep;\r
- ib_api_status_t status;\r
- ib_port_attr_mod_t port_attr_mod;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- /* calculate size of port_cm struct */\r
- p_port_cep = (cep_agent_t*)cl_zalloc( sizeof(cep_agent_t) );\r
- if( !p_port_cep )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Failed to cl_zalloc port CM agent.\n") );\r
- return IB_INSUFFICIENT_MEMORY;\r
- }\r
-\r
- construct_al_obj( &p_port_cep->obj, AL_OBJ_TYPE_CM );\r
-\r
- status = init_al_obj( &p_port_cep->obj, p_port_cep, TRUE,\r
- __destroying_port_cep, NULL, __free_port_cep );\r
- if( status != IB_SUCCESS )\r
- {\r
- __free_port_cep( &p_port_cep->obj );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- /* Attach to the global CM object. */\r
- status = attach_al_obj( &gp_cep_mgr->obj, &p_port_cep->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_port_cep->obj.pfn_destroy( &p_port_cep->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- p_port_cep->port_guid = p_pnp_rec->p_port_attr->port_guid;\r
- p_port_cep->port_num = p_pnp_rec->p_port_attr->port_num;\r
- p_port_cep->base_lid = p_pnp_rec->p_port_attr->lid;\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- cl_qmap_insert(\r
- &gp_cep_mgr->port_map, p_port_cep->port_guid, &p_port_cep->item );\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
-\r
- /* Get a reference to the CA on which we are loading. */\r
- p_port_cep->h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid );\r
- if( !p_port_cep->h_ca )\r
- {\r
- p_port_cep->obj.pfn_destroy( &p_port_cep->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("acquire_ca failed.\n") );\r
- return IB_INVALID_GUID; }\r
-\r
- status = __init_data_svc( p_port_cep, p_pnp_rec->p_port_attr );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_port_cep->obj.pfn_destroy( &p_port_cep->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("__init_data_svc failed with status %s.\n",\r
- ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- /* Update local port attributes */\r
- cl_memclr( &port_attr_mod, sizeof(ib_port_attr_mod_t) );\r
- port_attr_mod.cap.cm = TRUE;\r
- status = ib_modify_ca( p_port_cep->h_ca, p_pnp_rec->p_port_attr->port_num,\r
- IB_CA_MOD_IS_CM_SUPPORTED, &port_attr_mod );\r
-\r
- /* Update the PNP context to reference this port. */\r
- p_pnp_rec->pnp_rec.context = p_port_cep;\r
-\r
- /* Release the reference taken in init_al_obj. */\r
- deref_al_obj( &p_port_cep->obj );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-/******************************************************************************\r
-* Global CEP manager\r
-******************************************************************************/\r
-\r
-static cep_cid_t*\r
-__get_lcid(\r
- OUT net32_t* const p_cid )\r
-{\r
- cl_status_t status;\r
- uint32_t size, cid;\r
- cep_cid_t *p_cep_cid;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- size = (uint32_t)cl_vector_get_size( &gp_cep_mgr->cid_vector );\r
- cid = gp_cep_mgr->free_cid;\r
- if( gp_cep_mgr->free_cid == size )\r
- {\r
- /* Grow the vector pool. */\r
- status =\r
- cl_vector_set_size( &gp_cep_mgr->cid_vector, size + CEP_CID_GROW );\r
- if( status != CL_SUCCESS )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return NULL;\r
- }\r
- /*\r
- * Return the the start of the free list since the\r
- * entry initializer incremented it.\r
- */\r
- gp_cep_mgr->free_cid = size;\r
- }\r
-\r
- /* Get the next free entry. */\r
- p_cep_cid = (cep_cid_t*)cl_vector_get_ptr( &gp_cep_mgr->cid_vector, cid );\r
-\r
- /* Update the next entry index. */\r
- gp_cep_mgr->free_cid = (uint32_t)(uintn_t)p_cep_cid->p_cep;\r
-\r
- *p_cid = cid;\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return p_cep_cid;\r
-}\r
-\r
-\r
-static inline kcep_t*\r
-__lookup_cep(\r
- IN ib_al_handle_t h_al OPTIONAL,\r
- IN net32_t cid )\r
-{\r
- size_t idx;\r
- cep_cid_t *p_cid;\r
-\r
- /* Mask off the counter bits so we get the index in our vector. */\r
- idx = cid & CEP_MAX_CID_MASK;\r
-\r
- if( idx >= cl_vector_get_size( &gp_cep_mgr->cid_vector ) )\r
- return NULL;\r
-\r
- p_cid = (cep_cid_t*)cl_vector_get_ptr( &gp_cep_mgr->cid_vector, idx );\r
- if( !p_cid->h_al )\r
- return NULL;\r
-\r
- /*\r
- * h_al is NULL when processing MADs, so we need to match on\r
- * the actual local communication ID. If h_al is non-NULL, we\r
- * are doing a lookup from a call to our API, and only need to match\r
- * on the index in the vector (without the modifier).\r
- */\r
- if( h_al )\r
- {\r
- if( p_cid->h_al != h_al )\r
- return NULL;\r
- }\r
- else if( p_cid->p_cep->local_comm_id != cid )\r
- {\r
- return NULL;\r
- }\r
-\r
- return p_cid->p_cep;\r
-}\r
-\r
-\r
-/*\r
- * Lookup a CEP by remote comm ID and CA GUID.\r
- */\r
-static kcep_t*\r
-__lookup_by_id(\r
- IN net32_t remote_comm_id,\r
- IN net64_t remote_ca_guid )\r
-{\r
- cl_rbmap_item_t *p_item;\r
- kcep_t *p_cep;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- /* Match against pending connections using remote comm ID and CA GUID. */\r
- p_item = cl_rbmap_root( &gp_cep_mgr->conn_id_map );\r
- while( p_item != cl_rbmap_end( &gp_cep_mgr->conn_id_map ) )\r
- {\r
- p_cep = PARENT_STRUCT( p_item, kcep_t, rem_id_item );\r
-\r
- if( remote_comm_id < p_cep->remote_comm_id )\r
- p_item = cl_rbmap_left( p_item );\r
- else if( remote_comm_id > p_cep->remote_comm_id )\r
- p_item = cl_rbmap_right( p_item );\r
- else if( remote_ca_guid < p_cep->remote_ca_guid )\r
- p_item = cl_rbmap_left( p_item );\r
- else if( remote_ca_guid > p_cep->remote_ca_guid )\r
- p_item = cl_rbmap_right( p_item );\r
- else\r
- return p_cep;\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return NULL;\r
-}\r
-\r
-\r
-/*\r
- * Lookup a CEP by Service ID and private data.\r
- */\r
-static kcep_t*\r
-__lookup_listen(\r
- IN net64_t sid,\r
- IN net64_t port_guid,\r
- IN uint8_t *p_pdata )\r
-{\r
- cl_rbmap_item_t *p_item;\r
- kcep_t *p_cep;\r
- intn_t cmp;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- /* Match against pending connections using remote comm ID and CA GUID. */\r
- p_item = cl_rbmap_root( &gp_cep_mgr->listen_map );\r
- while( p_item != cl_rbmap_end( &gp_cep_mgr->listen_map ) )\r
- {\r
- p_cep = PARENT_STRUCT( p_item, kcep_t, listen_item );\r
-\r
- if( sid == p_cep->sid )\r
- goto port_cmp;\r
- else if( sid < p_cep->sid )\r
- p_item = cl_rbmap_left( p_item );\r
- else\r
- p_item = cl_rbmap_right( p_item );\r
-\r
- continue;\r
-\r
-port_cmp:\r
- if( p_cep->port_guid != IB_ALL_PORTS )\r
- {\r
- if( port_guid == p_cep->port_guid )\r
- goto pdata_cmp;\r
- else if( port_guid < p_cep->port_guid )\r
- p_item = cl_rbmap_left( p_item );\r
- else\r
- p_item = cl_rbmap_right( p_item );\r
-\r
- continue;\r
- }\r
-\r
-pdata_cmp:\r
- if( p_cep->p_cmp_buf && p_pdata )\r
- {\r
- cmp = cl_memcmp( &p_pdata[p_cep->cmp_offset],\r
- p_cep->p_cmp_buf, p_cep->cmp_len );\r
-\r
- if( !cmp )\r
- goto match;\r
- else if( cmp < 0 )\r
- p_item = cl_rbmap_left( p_item );\r
- else\r
- p_item = cl_rbmap_right( p_item );\r
-\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM,\r
- ("Svc ID match but compare buffer mismatch.\n") );\r
- continue;\r
- }\r
-\r
-match:\r
- /* Everything matched. */\r
- AL_EXIT( AL_DBG_CM );\r
- return p_cep;\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return NULL;\r
-}\r
-\r
-\r
-static kcep_t*\r
-__insert_by_id(\r
- IN kcep_t* const p_new_cep )\r
-{\r
- kcep_t *p_cep;\r
- cl_rbmap_item_t *p_item, *p_insert_at;\r
- boolean_t left = TRUE;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_item = cl_rbmap_root( &gp_cep_mgr->conn_id_map );\r
- p_insert_at = p_item;\r
- while( p_item != cl_rbmap_end( &gp_cep_mgr->conn_id_map ) )\r
- {\r
- p_insert_at = p_item;\r
- p_cep = PARENT_STRUCT( p_item, kcep_t, rem_id_item );\r
-\r
- if( p_new_cep->remote_comm_id < p_cep->remote_comm_id )\r
- p_item = cl_rbmap_left( p_item ), left = TRUE;\r
- else if( p_new_cep->remote_comm_id > p_cep->remote_comm_id )\r
- p_item = cl_rbmap_right( p_item ), left = FALSE;\r
- else if( p_new_cep->remote_ca_guid < p_cep->remote_ca_guid )\r
- p_item = cl_rbmap_left( p_item ), left = TRUE;\r
- else if( p_new_cep->remote_ca_guid > p_cep->remote_ca_guid )\r
- p_item = cl_rbmap_right( p_item ), left = FALSE;\r
- else\r
- {\r
- AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM,\r
- ("WARNING: Duplicate remote CID and CA GUID.\n") );\r
- goto done;\r
- }\r
- }\r
-\r
- cl_rbmap_insert(\r
- &gp_cep_mgr->conn_id_map, p_insert_at, &p_new_cep->rem_id_item, left );\r
- p_cep = p_new_cep;\r
-\r
-done:\r
- AL_EXIT( AL_DBG_CM );\r
- return p_cep;\r
-}\r
-\r
-\r
-static kcep_t*\r
-__insert_by_qpn(\r
- IN kcep_t* const p_new_cep )\r
-{\r
- kcep_t *p_cep;\r
- cl_rbmap_item_t *p_item, *p_insert_at;\r
- boolean_t left = TRUE;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_item = cl_rbmap_root( &gp_cep_mgr->conn_qp_map );\r
- p_insert_at = p_item;\r
- while( p_item != cl_rbmap_end( &gp_cep_mgr->conn_qp_map ) )\r
- {\r
- p_insert_at = p_item;\r
- p_cep = PARENT_STRUCT( p_item, kcep_t, rem_id_item );\r
-\r
- if( p_new_cep->remote_qpn < p_cep->remote_qpn )\r
- p_item = cl_rbmap_left( p_item ), left = TRUE;\r
- else if( p_new_cep->remote_qpn > p_cep->remote_qpn )\r
- p_item = cl_rbmap_right( p_item ), left = FALSE;\r
- else if( p_new_cep->remote_ca_guid < p_cep->remote_ca_guid )\r
- p_item = cl_rbmap_left( p_item ), left = TRUE;\r
- else if( p_new_cep->remote_ca_guid > p_cep->remote_ca_guid )\r
- p_item = cl_rbmap_right( p_item ), left = FALSE;\r
- else\r
- {\r
- AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM,\r
- ("WARNING: Duplicate remote QPN and CA GUID.\n") );\r
- goto done;\r
- }\r
- }\r
-\r
- cl_rbmap_insert(\r
- &gp_cep_mgr->conn_qp_map, p_insert_at, &p_new_cep->rem_qp_item, left );\r
- p_cep = p_new_cep;\r
-\r
-done:\r
- AL_EXIT( AL_DBG_CM );\r
- return p_cep;\r
-}\r
-\r
-\r
-static inline kcep_t*\r
-__insert_cep(\r
- IN kcep_t* const p_new_cep )\r
-{\r
- kcep_t *p_cep;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_cep = __insert_by_qpn( p_new_cep );\r
- if( p_cep != p_new_cep )\r
- goto err;\r
-\r
- p_cep = __insert_by_id( p_new_cep );\r
- if( p_cep != p_new_cep )\r
- {\r
- cl_rbmap_remove_item(\r
- &gp_cep_mgr->conn_qp_map, &p_new_cep->rem_qp_item );\r
-err:\r
- /*\r
- * Clear the remote QPN and comm ID so that we don't try\r
- * to remove the CEP from those maps.\r
- */\r
- p_new_cep->remote_qpn = 0;\r
- p_new_cep->remote_comm_id = 0;\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return p_cep;\r
-}\r
-\r
-\r
-static inline void\r
-__remove_cep(\r
- IN kcep_t* const p_cep )\r
-{\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- if( p_cep->remote_comm_id )\r
- {\r
- cl_rbmap_remove_item(\r
- &gp_cep_mgr->conn_id_map, &p_cep->rem_id_item );\r
- p_cep->remote_comm_id = 0;\r
- }\r
- if( p_cep->remote_qpn )\r
- {\r
- cl_rbmap_remove_item(\r
- &gp_cep_mgr->conn_qp_map, &p_cep->rem_qp_item );\r
- p_cep->remote_qpn = 0;\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static boolean_t\r
-__is_lid_valid(\r
- IN ib_net16_t lid,\r
- IN ib_net16_t port_lid,\r
- IN uint8_t lmc )\r
-{\r
- uint16_t lid1;\r
- uint16_t lid2;\r
- uint16_t path_bits;\r
-\r
- if(lmc)\r
- {\r
- lid1 = CL_NTOH16(lid);\r
- lid2 = CL_NTOH16(port_lid);\r
- path_bits = 0;\r
-\r
- if( lid1 < lid2 )\r
- return FALSE;\r
-\r
- while( lmc-- )\r
- path_bits = (uint16_t)( (path_bits << 1) | 1 );\r
-\r
- lid2 |= path_bits;\r
-\r
- if( lid1 > lid2)\r
- return FALSE;\r
- }\r
- else\r
- {\r
- if (lid != port_lid)\r
- return FALSE;\r
- }\r
-\r
- return TRUE;\r
-}\r
-\r
-\r
-static inline boolean_t\r
-__is_gid_valid(\r
- IN const ib_port_attr_t* const p_port_attr,\r
- IN const ib_gid_t* const p_gid )\r
-{\r
- uint16_t idx;\r
-\r
- for( idx = 0; idx < p_port_attr->num_gids; idx++ )\r
- {\r
- if( !cl_memcmp(\r
- p_gid, &p_port_attr->p_gid_table[idx], sizeof(ib_gid_t) ) )\r
- {\r
- return TRUE;\r
- }\r
- }\r
- return FALSE;\r
-}\r
-\r
-\r
-static inline boolean_t\r
-__get_pkey_index(\r
- IN const ib_port_attr_t* const p_port_attr,\r
- IN const net16_t pkey,\r
- OUT uint16_t* const p_pkey_index )\r
-{\r
- uint16_t idx;\r
-\r
- for( idx = 0; idx < p_port_attr->num_pkeys; idx++ )\r
- {\r
- if( p_port_attr->p_pkey_table[idx] == pkey )\r
- {\r
- *p_pkey_index = idx;\r
- return TRUE;\r
- }\r
- }\r
-\r
- return FALSE;\r
-}\r
-\r
-\r
-/* Returns the 1-based port index of the CEP agent with the specified GID. */\r
-static cep_agent_t*\r
-__find_port_cep(\r
- IN const ib_gid_t* const p_gid,\r
- IN const net16_t lid,\r
- IN const net16_t pkey,\r
- OUT uint16_t* const p_pkey_index )\r
-{\r
- cep_agent_t *p_port_cep;\r
- cl_list_item_t *p_item;\r
- const ib_port_attr_t *p_port_attr;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- cl_spinlock_acquire( &gp_cep_mgr->obj.lock );\r
- for( p_item = cl_qlist_head( &gp_cep_mgr->obj.obj_list );\r
- p_item != cl_qlist_end( &gp_cep_mgr->obj.obj_list );\r
- p_item = cl_qlist_next( p_item ) )\r
- {\r
- p_port_cep = PARENT_STRUCT( p_item, cep_agent_t, obj.pool_item );\r
-\r
- CL_ASSERT( p_port_cep->port_num );\r
-\r
- ci_ca_lock_attr( p_port_cep->h_ca->obj.p_ci_ca );\r
-\r
- p_port_attr = p_port_cep->h_ca->obj.p_ci_ca->p_pnp_attr->p_port_attr;\r
- p_port_attr += (p_port_cep->port_num - 1);\r
-\r
- if( __is_lid_valid( lid, p_port_attr->lid, p_port_attr->lmc ) &&\r
- __is_gid_valid( p_port_attr, p_gid ) &&\r
- __get_pkey_index( p_port_attr, pkey, p_pkey_index ) )\r
- {\r
- ci_ca_unlock_attr( p_port_cep->h_ca->obj.p_ci_ca );\r
- cl_spinlock_release( &gp_cep_mgr->obj.lock );\r
- AL_EXIT( AL_DBG_CM );\r
- return p_port_cep;\r
- }\r
-\r
- ci_ca_unlock_attr( p_port_cep->h_ca->obj.p_ci_ca );\r
- }\r
- cl_spinlock_release( &gp_cep_mgr->obj.lock );\r
- AL_EXIT( AL_DBG_CM );\r
- return NULL;\r
-}\r
-\r
-\r
-/*\r
- * PnP callback for port event notifications.\r
- */\r
-static ib_api_status_t\r
-__cep_pnp_cb(\r
- IN ib_pnp_rec_t *p_pnp_rec )\r
-{\r
- ib_api_status_t status = IB_SUCCESS;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- switch( p_pnp_rec->pnp_event )\r
- {\r
- case IB_PNP_PORT_ADD:\r
- /* Create the port agent. */\r
- CL_ASSERT( !p_pnp_rec->context );\r
- status = __create_port_cep( (ib_pnp_port_rec_t*)p_pnp_rec );\r
- break;\r
-\r
- case IB_PNP_PORT_REMOVE:\r
- CL_ASSERT( p_pnp_rec->context );\r
-\r
- /* Destroy the port agent. */\r
- ref_al_obj( &((cep_agent_t* __ptr64)p_pnp_rec->context)->obj );\r
- ((cep_agent_t* __ptr64)p_pnp_rec->context)->obj.pfn_destroy(\r
- &((cep_agent_t* __ptr64)p_pnp_rec->context)->obj, NULL );\r
- break;\r
-\r
- default:\r
- break; /* Ignore other PNP events. */\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-static inline int64_t\r
-__min_timewait(\r
- IN int64_t current_min,\r
- IN kcep_t* const p_cep )\r
-{\r
- /*\r
- * The minimum timer interval is 50 milliseconds. This means\r
- * 500000 100ns increments. Since __process_timewait divides the\r
- * result in half (so that the worst cast timewait interval is 150%)\r
- * we compensate for this here. Note that relative time values are\r
- * expressed as negative.\r
- */\r
-#define MIN_TIMEWAIT_100NS -1000000\r
-\r
- /* Still in timewait - try again next time. */\r
- if( !current_min )\r
- {\r
- return min( p_cep->timewait_time.QuadPart, MIN_TIMEWAIT_100NS );\r
- }\r
- else\r
- {\r
- return max( current_min,\r
- min( p_cep->timewait_time.QuadPart, MIN_TIMEWAIT_100NS ) );\r
- }\r
-}\r
-\r
-\r
-/*\r
- * Timer callback to process CEPs in timewait state. Returns time in ms.\r
- */\r
-static uint32_t\r
-__process_timewait()\r
-{\r
- cl_list_item_t *p_item;\r
- kcep_t *p_cep;\r
- LARGE_INTEGER timeout;\r
- int64_t min_timewait = 0;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
-\r
- timeout.QuadPart = 0;\r
-\r
- p_item = cl_qlist_head( &gp_cep_mgr->timewait_list );\r
- while( p_item != cl_qlist_end( &gp_cep_mgr->timewait_list ) )\r
- {\r
- p_cep = PARENT_STRUCT( p_item, kcep_t, timewait_item );\r
- p_item = cl_qlist_next( p_item );\r
-\r
- CL_ASSERT( p_cep->state == CEP_STATE_DESTROY ||\r
- p_cep->state == CEP_STATE_TIMEWAIT );\r
-\r
- CL_ASSERT( !p_cep->p_mad );\r
-\r
- if( KeWaitForSingleObject( &p_cep->timewait_timer, Executive,\r
- KernelMode, FALSE, &timeout ) != STATUS_SUCCESS )\r
- {\r
- /* Still in timewait - try again next time. */\r
- min_timewait = __min_timewait( min_timewait, p_cep );\r
- continue;\r
- }\r
-\r
- if( p_cep->ref_cnt )\r
- {\r
- /* Send outstanding or destruction in progress. */\r
- min_timewait = __min_timewait( min_timewait, p_cep );\r
- continue;\r
- }\r
-\r
- /* Remove from the timewait list. */\r
- cl_qlist_remove_item( &gp_cep_mgr->timewait_list, &p_cep->timewait_item );\r
-\r
- /*\r
- * Not in timewait. Remove the CEP from the maps - it should\r
- * no longer be matched against.\r
- */\r
- __remove_cep( p_cep );\r
-\r
- if( p_cep->state == CEP_STATE_DESTROY )\r
- {\r
- __destroy_cep( p_cep );\r
- }\r
- else\r
- {\r
- /* Move the CEP to the IDLE state so that it can be used again. */\r
- p_cep->state = CEP_STATE_IDLE;\r
- }\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return (uint32_t)(min_timewait / -20000);\r
-}\r
-\r
-\r
-/*\r
- * Timer callback to process CEPs in timewait state.\r
- */\r
-static void\r
-__cep_timewait_cb(\r
- IN void *context )\r
-{\r
- KLOCK_QUEUE_HANDLE hdl;\r
- uint32_t min_timewait;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- UNUSED_PARAM( context );\r
-\r
- CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
-\r
- KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
-\r
- min_timewait = __process_timewait();\r
-\r
- if( cl_qlist_count( &gp_cep_mgr->timewait_list ) )\r
- {\r
- /*\r
- * Reset the timer for half of the shortest timeout - this results\r
- * in a worst case timeout of 150% of timewait.\r
- */\r
- cl_timer_trim( &gp_cep_mgr->timewait_timer, min_timewait );\r
- }\r
-\r
- KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-/*\r
- * Starts immediate cleanup of the CM. Invoked during al_obj destruction.\r
- */\r
-static void\r
-__destroying_cep_mgr(\r
- IN al_obj_t* p_obj )\r
-{\r
- ib_api_status_t status;\r
- KLOCK_QUEUE_HANDLE hdl;\r
- cl_list_item_t *p_item;\r
- kcep_t *p_cep;\r
- LARGE_INTEGER timeout;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( &gp_cep_mgr->obj == p_obj );\r
- UNUSED_PARAM( p_obj );\r
-\r
- /* Deregister from PnP notifications. */\r
- if( gp_cep_mgr->h_pnp )\r
- {\r
- status = ib_dereg_pnp(\r
- gp_cep_mgr->h_pnp, (ib_pfn_destroy_cb_t)deref_al_obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_dereg_pnp failed with status %s.\n",\r
- ib_get_err_str(status)) );\r
- deref_al_obj( &gp_cep_mgr->obj );\r
- }\r
- }\r
-\r
- /* Cancel all timewait timers. */\r
- timeout.QuadPart = 0;\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- for( p_item = cl_qlist_head( &gp_cep_mgr->timewait_list );\r
- p_item != cl_qlist_end( &gp_cep_mgr->timewait_list );\r
- p_item = cl_qlist_next( p_item ) )\r
- {\r
- p_cep = PARENT_STRUCT( p_item, kcep_t, timewait_item );\r
- KeSetTimer( &p_cep->timewait_timer, timeout, NULL );\r
- }\r
- __process_timewait();\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-/*\r
- * Frees the global CEP agent. Invoked during al_obj destruction.\r
- */\r
-static void\r
-__free_cep_mgr(\r
- IN al_obj_t* p_obj )\r
-{\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( &gp_cep_mgr->obj == p_obj );\r
- /* All listen request should have been cleaned up by this point. */\r
- CL_ASSERT( cl_is_rbmap_empty( &gp_cep_mgr->listen_map ) );\r
- /* All connections should have been cancelled/disconnected by now. */\r
- CL_ASSERT( cl_is_rbmap_empty( &gp_cep_mgr->conn_id_map ) );\r
- CL_ASSERT( cl_is_rbmap_empty( &gp_cep_mgr->conn_qp_map ) );\r
-\r
- cl_vector_destroy( &gp_cep_mgr->cid_vector );\r
-\r
- cl_timer_destroy( &gp_cep_mgr->timewait_timer );\r
-\r
- /*\r
- * All CM port agents should have been destroyed by now via the\r
- * standard child object destruction provided by the al_obj.\r
- */\r
- ExDeleteNPagedLookasideList( &gp_cep_mgr->cep_pool );\r
- destroy_al_obj( p_obj );\r
-\r
- cl_free( gp_cep_mgr );\r
- gp_cep_mgr = NULL;\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static cl_status_t\r
-__cid_init(\r
- IN void* const p_element,\r
- IN void* context )\r
-{\r
- cep_cid_t *p_cid;\r
-\r
- UNUSED_PARAM( context );\r
-\r
- p_cid = (cep_cid_t*)p_element;\r
-\r
- p_cid->h_al = NULL;\r
- p_cid->p_cep = (kcep_t*)(uintn_t)++gp_cep_mgr->free_cid;\r
- p_cid->modifier = 0;\r
-\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Allocates and initialized the global CM agent.\r
- */\r
-ib_api_status_t\r
-create_cep_mgr(\r
- IN al_obj_t* const p_parent_obj )\r
-{\r
- ib_api_status_t status;\r
- cl_status_t cl_status;\r
- ib_pnp_req_t pnp_req;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( gp_cep_mgr == NULL );\r
-\r
- /* Allocate the global CM agent. */\r
- gp_cep_mgr = (al_cep_mgr_t*)cl_zalloc( sizeof(al_cep_mgr_t) );\r
- if( !gp_cep_mgr )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Failed allocation of global CM agent.\n") );\r
- return IB_INSUFFICIENT_MEMORY;\r
- }\r
-\r
- construct_al_obj( &gp_cep_mgr->obj, AL_OBJ_TYPE_CM );\r
- ExInitializeNPagedLookasideList( &gp_cep_mgr->cep_pool, NULL, NULL,\r
- 0, sizeof(kcep_t), 'PECK', 0 );\r
- cl_qmap_init( &gp_cep_mgr->port_map );\r
- cl_rbmap_init( &gp_cep_mgr->listen_map );\r
- cl_rbmap_init( &gp_cep_mgr->conn_id_map );\r
- cl_rbmap_init( &gp_cep_mgr->conn_qp_map );\r
- cl_qlist_init( &gp_cep_mgr->timewait_list );\r
- /* Timer initialization can't fail in kernel-mode. */\r
- cl_timer_init( &gp_cep_mgr->timewait_timer, __cep_timewait_cb, NULL );\r
- cl_vector_construct( &gp_cep_mgr->cid_vector );\r
-\r
- status = init_al_obj( &gp_cep_mgr->obj, NULL, FALSE,\r
- __destroying_cep_mgr, NULL, __free_cep_mgr );\r
- if( status != IB_SUCCESS )\r
- {\r
- __free_cep_mgr( &gp_cep_mgr->obj );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
- /* Attach to the parent object. */\r
- status = attach_al_obj( p_parent_obj, &gp_cep_mgr->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- cl_status = cl_vector_init( &gp_cep_mgr->cid_vector,\r
- CEP_CID_MIN, CEP_CID_GROW, sizeof(cep_cid_t), __cid_init, NULL, NULL );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("cl_vector_init failed with status %#x.\n",\r
- cl_status) );\r
- return ib_convert_cl_status( cl_status );\r
- }\r
-\r
- gp_cep_mgr->free_cid = 1;\r
-\r
- /* Register for port PnP notifications. */\r
- cl_memclr( &pnp_req, sizeof(pnp_req) );\r
- pnp_req.pnp_class = IB_PNP_PORT;\r
- pnp_req.pnp_context = &gp_cep_mgr->obj;\r
- pnp_req.pfn_pnp_cb = __cep_pnp_cb;\r
- status = ib_reg_pnp( gh_al, &pnp_req, &gp_cep_mgr->h_pnp );\r
- if( status != IB_SUCCESS )\r
- {\r
- gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_reg_pnp failed with status %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- /*\r
- * Leave the reference taken in init_al_obj oustanding since PnP\r
- * deregistration is asynchronous. This replaces a call to ref and\r
- * deref the object.\r
- */\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_SUCCESS;\r
-}\r
-\r
-/******************************************************************************\r
-* CEP manager API\r
-******************************************************************************/\r
-\r
-static inline void \r
-__complete_ndi_irp(\r
- IN PIRP p_irp,\r
- IN ib_mad_element_t* p_mad )\r
-{\r
- NTSTATUS nt_status;\r
- net32_t* p_new_cid = (net32_t*)cl_ioctl_out_buf( p_irp );\r
- kcep_t* p_cep = (kcep_t* __ptr64)p_mad->send_context1;\r
-\r
- *p_new_cid = p_cep->cid;\r
- nt_status = STATUS_SUCCESS;\r
- p_irp->IoStatus.Information = sizeof(uint32_t);\r
- p_irp->IoStatus.Status = nt_status;\r
- IoCompleteRequest( p_irp, IO_NETWORK_INCREMENT );\r
- ib_put_mad( p_mad );\r
-}\r
-\r
-static ib_api_status_t\r
-__cep_set_pdata(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- IN uint8_t psize,\r
- IN uint8_t* pdata )\r
-{\r
- kcep_t *p_cep;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
-\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
- cl_memclr( p_cep->pdata, sizeof(p_cep->pdata) );\r
- p_cep->psize = min( psize, sizeof(p_cep->pdata) );\r
- memcpy( p_cep->pdata, pdata, p_cep->psize );\r
- AL_PRINT(TRACE_LEVEL_INFORMATION ,AL_DBG_CM ,\r
- ("__cep_set_pdata: set %d of pdata for cid %d, h_al %p, context %p \n", \r
- p_cep->psize, cid, h_al, p_cep->context ));\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_SUCCESS;\r
-}\r
-\r
-/* Called with the CEP and CEP manager locks held */\r
-static ib_api_status_t\r
-__cep_queue_mad(\r
- IN kcep_t* const p_cep,\r
- IN ib_mad_element_t* p_mad )\r
-{\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( !p_mad->p_next );\r
-\r
- if( p_cep->state == CEP_STATE_DESTROY )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_STATE;\r
- }\r
-\r
- /* NDI connection request case */\r
- if ( p_cep->state == CEP_STATE_LISTEN &&\r
- (p_cep->sid & ~0x0ffffffI64) == IB_REQ_CM_RDMA_SID_PREFIX )\r
- { /* Try to complete pending IRP, if any */\r
- PIRP p_irp;\r
- PLIST_ENTRY p_list_entry;\r
- mad_cm_req_t* p_req = (mad_cm_req_t*)ib_get_mad_buf( p_mad );\r
- ib_cm_rdma_req_t *p_rdma_req = (ib_cm_rdma_req_t *)p_req->pdata;\r
-\r
- /* reject connection request with incorrect version parameters */\r
- if ( ((p_rdma_req->maj_min_ver >> 4) != IB_REQ_CM_RDMA_MAJOR_VERSION) ||\r
- ((p_rdma_req->maj_min_ver & 0x0f) > IB_REQ_CM_RDMA_MINOR_VERSION) ||\r
- (p_rdma_req->ipv != 0x40 && p_rdma_req->ipv != 0x60) )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, \r
- ("NDI connection req is rejected: maj_min_ver %d, ipv %#x \n", \r
- p_rdma_req->maj_min_ver, p_rdma_req->ipv ) );\r
- return IB_UNSUPPORTED;\r
- }\r
- \r
- /* get a pending IRP */\r
- if ( !IsListEmpty( &p_cep->irp_que ) )\r
- {\r
- kcep_t* p_new_cep = (kcep_t* __ptr64)p_mad->send_context1;\r
- \r
- // get IRP\r
- p_list_entry = RemoveHeadList( &p_cep->irp_que );\r
- p_irp = (PIRP)CONTAINING_RECORD( p_list_entry, IRP, Tail.Overlay.ListEntry );\r
- \r
- // store REQ private data\r
- __cep_set_pdata( \r
- (ib_al_handle_t)p_irp->Tail.Overlay.DriverContext[1], \r
- p_new_cep->cid, sizeof(p_req->pdata), (uint8_t*)p_req->pdata );\r
- AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,\r
- ("set %d of REQ pdata to CEP with cid %d, h_al %p\n", \r
- sizeof(p_req->pdata), p_new_cep->cid, \r
- (ib_al_handle_t)p_irp->Tail.Overlay.DriverContext[1] ));\r
-\r
- // complete GetConnectionReq IRP\r
-#pragma warning(push, 3)\r
- IoSetCancelRoutine( p_irp, NULL );\r
-#pragma warning(pop)\r
-\r
- __complete_ndi_irp( p_irp, p_mad );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_PENDING;\r
- }\r
- }\r
-\r
- /* Queue this MAD for processing. */\r
- if( p_cep->p_mad_head )\r
- {\r
- CL_ASSERT( p_cep->signalled );\r
- /*\r
- * If there's already a MAD at the head of the list, we will not\r
- * invoke the callback. Just queue and exit.\r
- */\r
- CL_ASSERT( p_cep->p_mad_tail );\r
- p_cep->p_mad_tail->p_next = p_mad;\r
- p_cep->p_mad_tail = p_mad;\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_PENDING;\r
- }\r
-\r
- p_cep->p_mad_head = p_mad;\r
- p_cep->p_mad_tail = p_mad;\r
-\r
- if( p_cep->signalled )\r
- {\r
- /* signalled was already non-zero. Don't invoke the callback again. */\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_PENDING;\r
- }\r
-\r
- p_cep->signalled = TRUE;\r
-\r
- /* Take a reference since we're about to invoke the callback. */\r
- cl_atomic_inc( &p_cep->ref_cnt );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-static inline void\r
-__cep_complete_irp(\r
- IN kcep_t* const p_cep,\r
- IN NTSTATUS status,\r
- IN CCHAR increment )\r
-{\r
- IRP *p_irp;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_irp = InterlockedExchangePointer( &p_cep->p_irp, NULL );\r
-\r
- if( p_irp )\r
- {\r
-#pragma warning(push, 3)\r
- IoSetCancelRoutine( p_irp, NULL );\r
-#pragma warning(pop)\r
-\r
- /* Complete the IRP. */\r
- p_irp->IoStatus.Status = status;\r
- p_irp->IoStatus.Information = 0;\r
- IoCompleteRequest( p_irp, increment );\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static inline void\r
-__process_cep(\r
- IN kcep_t* const p_cep )\r
-{\r
- ib_pfn_destroy_cb_t pfn_destroy_cb;\r
- void *context;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
-\r
- /* Signal to the user there are callback waiting. */\r
- if( p_cep->pfn_cb )\r
- p_cep->pfn_cb( p_cep->p_cid->h_al, p_cep->cid );\r
- else\r
- __cep_complete_irp( p_cep, STATUS_SUCCESS, IO_NETWORK_INCREMENT );\r
-\r
- pfn_destroy_cb = p_cep->pfn_destroy_cb;\r
- context = p_cep->context;\r
-\r
- /*\r
- * Release the reference for the callback and invoke the destroy\r
- * callback if necessary.\r
- */\r
- if( !cl_atomic_dec( &p_cep->ref_cnt ) && pfn_destroy_cb )\r
- pfn_destroy_cb( context );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static uint32_t\r
-__calc_mad_timeout(\r
- IN const uint8_t pkt_life )\r
-{\r
- /*\r
- * Calculate the retry timeout.\r
- * All timeout values in micro seconds are expressed as 4.096 * 2^x,\r
- * where x is the timeout. The formula to approximates this to\r
- * milliseconds using just shifts and subtraction is:\r
- * timeout_ms = 67 << (x - 14)\r
- * The results are off by 0.162%.\r
- *\r
- * Note that we will never return less than 1 millisecond. We also\r
- * trap exceedingly large values to prevent wrapping.\r
- */\r
- if( pkt_life > 39 )\r
- return ~0UL;\r
- if( pkt_life > 14 )\r
- return 67 << (pkt_life - 14);\r
- else if( pkt_life > 8 )\r
- return 67 >> (14 - pkt_life);\r
- else\r
- return 1;\r
-}\r
-\r
-\r
-/* CEP manager lock is held when calling this function. */\r
-static kcep_t*\r
-__create_cep()\r
-{\r
- kcep_t *p_cep;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_cep = ExAllocateFromNPagedLookasideList( &gp_cep_mgr->cep_pool );\r
- if( !p_cep )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed to allocate CEP.\n") );\r
- return NULL;\r
- }\r
-\r
- cl_memclr( p_cep, sizeof(kcep_t) );\r
-\r
- KeInitializeTimer( &p_cep->timewait_timer );\r
-\r
- p_cep->state = CEP_STATE_IDLE;\r
-\r
- /*\r
- * Pre-charge the reference count to 1. The code will invoke the\r
- * destroy callback once the ref count reaches to zero.\r
- */\r
- p_cep->ref_cnt = 1;\r
- p_cep->signalled = FALSE;\r
-\r
- /* Find a free entry in the CID vector. */\r
- p_cep->p_cid = __get_lcid( &p_cep->cid );\r
-\r
- if( !p_cep->p_cid )\r
- {\r
- ExFreeToNPagedLookasideList( &gp_cep_mgr->cep_pool, p_cep );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed to get CID.\n") );\r
- return NULL;\r
- }\r
-\r
- p_cep->p_cid->modifier++;\r
- /*\r
- * We don't ever want a modifier of zero for the CID at index zero\r
- * since it would result in a total CID of zero.\r
- */\r
- if( !p_cep->cid && !p_cep->p_cid->modifier )\r
- p_cep->p_cid->modifier++;\r
-\r
- p_cep->local_comm_id = p_cep->cid | (p_cep->p_cid->modifier << 24);\r
- p_cep->tid = p_cep->local_comm_id;\r
-\r
- p_cep->p_cid->p_cep = p_cep;\r
-\r
- InitializeListHead( &p_cep->irp_que );\r
-\r
- ref_al_obj( &gp_cep_mgr->obj );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return p_cep;\r
-}\r
-\r
-\r
-static inline void\r
-__bind_cep(\r
- IN kcep_t* const p_cep,\r
- IN ib_al_handle_t h_al,\r
- IN al_pfn_cep_cb_t pfn_cb,\r
- IN void* __ptr64 context )\r
-{\r
- CL_ASSERT( p_cep );\r
- CL_ASSERT( p_cep->p_cid );\r
- CL_ASSERT( h_al );\r
-\r
- p_cep->p_cid->h_al = h_al;\r
- p_cep->pfn_cb = pfn_cb;\r
- p_cep->context = context;\r
-\r
- /* Track the CEP in its owning AL instance. */\r
- cl_spinlock_acquire( &h_al->obj.lock );\r
- cl_qlist_insert_tail( &h_al->cep_list, &p_cep->al_item );\r
- cl_spinlock_release( &h_al->obj.lock );\r
-}\r
-\r
-\r
-static inline void\r
-__unbind_cep(\r
- IN kcep_t* const p_cep )\r
-{\r
- CL_ASSERT( p_cep );\r
- CL_ASSERT( p_cep->p_cid );\r
- CL_ASSERT( p_cep->p_cid->h_al );\r
-\r
- /* Track the CEP in its owning AL instance. */\r
- cl_spinlock_acquire( &p_cep->p_cid->h_al->obj.lock );\r
- cl_qlist_remove_item( &p_cep->p_cid->h_al->cep_list, &p_cep->al_item );\r
- cl_spinlock_release( &p_cep->p_cid->h_al->obj.lock );\r
-\r
- /*\r
- * Set to the internal AL handle - it needs to be non-NULL to indicate it's\r
- * a valid entry, and it can't be a user's AL instance to prevent using a\r
- * destroyed CEP.\r
- */\r
- p_cep->p_cid->h_al = gh_al;\r
-#ifdef _DEBUG_\r
- p_cep->pfn_cb = NULL;\r
-#endif /* _DEBUG_ */\r
-}\r
-\r
-\r
-static inline void\r
-__calc_timewait(\r
- IN kcep_t* const p_cep )\r
-{\r
-\r
- /*\r
- * Use the CEP's stored packet lifetime to calculate the time at which\r
- * the CEP exits timewait. Packet lifetime is expressed as\r
- * 4.096 * 2^pkt_life microseconds, and we need a timeout in 100ns\r
- * increments. The formual using just shifts and subtraction is this:\r
- * timeout = (41943 << (pkt_life - 10));\r
- * The results are off by .0001%, which should be more than adequate.\r
- */\r
- if( p_cep->max_2pkt_life > 10 )\r
- {\r
- p_cep->timewait_time.QuadPart =\r
- -(41943i64 << (p_cep->max_2pkt_life - 10));\r
- }\r
- else\r
- {\r
- p_cep->timewait_time.QuadPart =\r
- -(41943i64 >> (10 - p_cep->max_2pkt_life));\r
- }\r
- if( p_cep->target_ack_delay > 10 )\r
- {\r
- p_cep->timewait_time.QuadPart -=\r
- (41943i64 << (p_cep->target_ack_delay - 10));\r
- }\r
- else\r
- {\r
- p_cep->timewait_time.QuadPart -=\r
- (41943i64 >> (10 - p_cep->target_ack_delay));\r
- }\r
-}\r
-\r
-\r
-/* Called with CEP manager and CEP locks held. */\r
-static inline void\r
-__insert_timewait(\r
- IN kcep_t* const p_cep )\r
-{\r
- cl_qlist_insert_tail( &gp_cep_mgr->timewait_list, &p_cep->timewait_item );\r
-\r
- KeSetTimer( &p_cep->timewait_timer, p_cep->timewait_time, NULL );\r
-\r
- /*\r
- * Reset the timer for half of the shortest timeout - this results\r
- * in a worst case timeout of 150% of timewait.\r
- */\r
- cl_timer_trim( &gp_cep_mgr->timewait_timer,\r
- (uint32_t)(-p_cep->timewait_time.QuadPart / 20000) );\r
-}\r
-\r
-\r
-static inline ib_api_status_t\r
-__do_cep_rej(\r
- IN kcep_t* const p_cep,\r
- IN ib_rej_status_t rej_status,\r
- IN const uint8_t* const p_ari,\r
- IN uint8_t ari_len,\r
- IN const uint8_t* const p_pdata,\r
- IN uint8_t pdata_len )\r
-{\r
- ib_api_status_t status;\r
- cep_agent_t *p_port_cep;\r
- ib_mad_element_t *p_mad;\r
-\r
- p_port_cep = __get_cep_agent( p_cep );\r
- if( !p_port_cep )\r
- return IB_INSUFFICIENT_RESOURCES;\r
-\r
- status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_mad );\r
- if( status != IB_SUCCESS )\r
- return status;\r
-\r
- __format_mad_av( p_mad, &p_cep->av[p_cep->idx_primary] );\r
-\r
- status = conn_rej_set_ari(\r
- p_ari, ari_len, (mad_cm_rej_t*)p_mad->p_mad_buf );\r
- if( status != IB_SUCCESS )\r
- return status;\r
-\r
- status = conn_rej_set_pdata(\r
- p_pdata, pdata_len, (mad_cm_rej_t*)p_mad->p_mad_buf );\r
- if( status != IB_SUCCESS )\r
- return status;\r
-\r
- __reject_mad( p_port_cep, p_cep, p_mad, rej_status );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__cep_get_mad(\r
- IN kcep_t* const p_cep,\r
- IN net16_t attr_id,\r
- OUT cep_agent_t** const pp_port_cep,\r
- OUT ib_mad_element_t** const pp_mad )\r
-{\r
- cep_agent_t *p_port_cep;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_port_cep = __get_cep_agent( p_cep );\r
- if( !p_port_cep )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("__get_cep_agent failed.\n") );\r
- return IB_INSUFFICIENT_RESOURCES;\r
- }\r
-\r
- status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, pp_mad );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_get_mad returned %s.\n", ib_get_err_str( status )) );\r
- return status;\r
- }\r
-\r
- __format_mad_av( *pp_mad, &p_cep->av[p_cep->idx_primary] );\r
-\r
- __format_mad_hdr( (*pp_mad)->p_mad_buf, p_cep, attr_id );\r
-\r
- *pp_port_cep = p_port_cep;\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__format_dreq(\r
- IN kcep_t* const p_cep, \r
- IN const uint8_t* p_pdata OPTIONAL,\r
- IN uint8_t pdata_len,\r
- IN OUT ib_mad_element_t* const p_mad )\r
-{\r
- ib_api_status_t status;\r
- mad_cm_dreq_t *p_dreq;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_dreq = (mad_cm_dreq_t*)p_mad->p_mad_buf;\r
-\r
- p_dreq->local_comm_id = p_cep->local_comm_id;\r
- p_dreq->remote_comm_id = p_cep->remote_comm_id;\r
-\r
- conn_dreq_set_remote_qpn( p_cep->remote_qpn, p_dreq );\r
-\r
- /* copy optional data */\r
- status = conn_dreq_set_pdata( p_pdata, pdata_len, p_dreq );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__dreq_cep(\r
- IN kcep_t* const p_cep )\r
-{\r
- ib_api_status_t status;\r
- cep_agent_t *p_agt;\r
- ib_mad_element_t *p_mad;\r
-\r
- status = __cep_get_mad( p_cep, CM_DREQ_ATTR_ID, &p_agt, &p_mad );\r
- if( status != IB_SUCCESS )\r
- return status;\r
-\r
- status = __format_dreq( p_cep, NULL, 0, p_mad );\r
- if( status != IB_SUCCESS )\r
- return status;\r
-\r
- return __cep_send_retry( p_agt, p_cep, p_mad );\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__format_drep(\r
- IN kcep_t* const p_cep,\r
- IN const uint8_t* p_pdata OPTIONAL,\r
- IN uint8_t pdata_len,\r
- IN OUT mad_cm_drep_t* const p_drep )\r
-{\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_drep->local_comm_id = p_cep->local_comm_id;\r
- p_drep->remote_comm_id = p_cep->remote_comm_id;\r
-\r
- /* copy optional data */\r
- status = conn_drep_set_pdata( p_pdata, pdata_len, p_drep );\r
-\r
- /* Store the RTU MAD so we can repeat it if we get a repeated DREP. */\r
- if( status == IB_SUCCESS && p_drep != &p_cep->mads.drep )\r
- p_cep->mads.drep = *p_drep;\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-static void\r
-__drep_cep(\r
- IN kcep_t* const p_cep )\r
-{\r
- cep_agent_t *p_agt;\r
- ib_mad_element_t *p_mad;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- if( __cep_get_mad( p_cep, CM_DREP_ATTR_ID, &p_agt, &p_mad ) != IB_SUCCESS )\r
- return;\r
-\r
- if( __format_drep( p_cep, NULL, 0, (mad_cm_drep_t*)p_mad->p_mad_buf )\r
- != IB_SUCCESS )\r
- {\r
- return;\r
- }\r
-\r
- __cep_send_mad( p_agt, p_mad );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-/* Called with CEP manager lock held. */\r
-static int32_t\r
-__cleanup_cep(\r
- IN kcep_t* const p_cep )\r
-{\r
- ib_mad_element_t *p_mad;\r
- kcep_t *p_new_cep;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( p_cep );\r
- CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
-\r
- /* If we've already come through here, we're done. */\r
- if( p_cep->state == CEP_STATE_DESTROY ||\r
- p_cep->state == CEP_STATE_DREQ_DESTROY )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return -1;\r
- }\r
-\r
- /* Cleanup the pending MAD list. */\r
- while( p_cep->p_mad_head )\r
- {\r
- p_mad = p_cep->p_mad_head;\r
- p_cep->p_mad_head = p_mad->p_next;\r
- p_mad->p_next = NULL;\r
- if( p_mad->send_context1 )\r
- {\r
- p_new_cep = (kcep_t* __ptr64)p_mad->send_context1;\r
-\r
- __unbind_cep( p_new_cep );\r
- __cleanup_cep( p_new_cep );\r
- }\r
- ib_put_mad( p_mad );\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_PRE_REP:\r
- case CEP_STATE_PRE_REP_MRA_SENT:\r
- CL_ASSERT( p_cep->p_mad );\r
- ib_put_mad( p_cep->p_mad );\r
- p_cep->p_mad = NULL;\r
- /* Fall through. */\r
- case CEP_STATE_REQ_RCVD:\r
- case CEP_STATE_REP_RCVD:\r
- case CEP_STATE_REQ_MRA_SENT:\r
- case CEP_STATE_REP_MRA_SENT:\r
- /* Reject the connection. */\r
- __do_cep_rej( p_cep, IB_REJ_USER_DEFINED, NULL, 0, NULL, 0 );\r
- break;\r
-\r
- case CEP_STATE_REQ_SENT:\r
- case CEP_STATE_REQ_MRA_RCVD:\r
- case CEP_STATE_REP_SENT:\r
- case CEP_STATE_REP_MRA_RCVD:\r
- /* Cancel the send. */\r
- CL_ASSERT( p_cep->h_mad_svc );\r
- CL_ASSERT( p_cep->p_send_mad );\r
- ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
- /* Reject the connection. */\r
- __do_cep_rej( p_cep, IB_REJ_TIMEOUT, (uint8_t*)&p_cep->local_ca_guid,\r
- sizeof(p_cep->local_ca_guid), NULL, 0 );\r
- break;\r
-\r
- case CEP_STATE_ESTABLISHED:\r
- case CEP_STATE_LAP_RCVD:\r
- case CEP_STATE_LAP_SENT:\r
- case CEP_STATE_LAP_MRA_RCVD:\r
- case CEP_STATE_LAP_MRA_SENT:\r
- case CEP_STATE_PRE_APR:\r
- case CEP_STATE_PRE_APR_MRA_SENT:\r
- /* Disconnect the connection. */\r
- if( __dreq_cep( p_cep ) != IB_SUCCESS )\r
- break;\r
- /* Fall through. */\r
-\r
- case CEP_STATE_DREQ_SENT:\r
- p_cep->state = CEP_STATE_DREQ_DESTROY;\r
- AL_EXIT( AL_DBG_CM );\r
- return cl_atomic_dec( &p_cep->ref_cnt );\r
-\r
- case CEP_STATE_DREQ_RCVD:\r
- /* Send the DREP. */\r
- __drep_cep( p_cep );\r
- break;\r
-\r
- case CEP_STATE_SREQ_RCVD:\r
- /* TODO: Reject the SIDR request. */\r
- break;\r
-\r
- case CEP_STATE_LISTEN:\r
- /* Remove from listen map. */\r
- cl_rbmap_remove_item( &gp_cep_mgr->listen_map, &p_cep->listen_item );\r
-\r
- if( p_cep->p_cmp_buf )\r
- {\r
- cl_free( p_cep->p_cmp_buf );\r
- p_cep->p_cmp_buf = NULL;\r
- }\r
- break;\r
-\r
- case CEP_STATE_PRE_REQ:\r
- CL_ASSERT( p_cep->p_mad );\r
- ib_put_mad( p_cep->p_mad );\r
- p_cep->p_mad = NULL;\r
- /* Fall through. */\r
- case CEP_STATE_IDLE:\r
- break;\r
-\r
- default:\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("CEP in state %d.\n", p_cep->state) );\r
- case CEP_STATE_TIMEWAIT:\r
- /* Already in timewait - so all is good. */\r
- p_cep->state = CEP_STATE_DESTROY;\r
- AL_EXIT( AL_DBG_CM );\r
- return cl_atomic_dec( &p_cep->ref_cnt );\r
- }\r
-\r
- p_cep->state = CEP_STATE_DESTROY;\r
- __insert_timewait( p_cep );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return cl_atomic_dec( &p_cep->ref_cnt );\r
-}\r
-\r
-\r
-static void\r
-__destroy_cep(\r
- IN kcep_t* const p_cep )\r
-{\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT(\r
- p_cep->cid < cl_vector_get_size( &gp_cep_mgr->cid_vector ) );\r
-\r
- CL_ASSERT( p_cep->p_cid == (cep_cid_t*)cl_vector_get_ptr(\r
- &gp_cep_mgr->cid_vector, p_cep->cid ) );\r
-\r
- /* Free the CID. */\r
- p_cep->p_cid->p_cep = (kcep_t*)(uintn_t)gp_cep_mgr->free_cid;\r
- p_cep->p_cid->h_al = NULL;\r
- gp_cep_mgr->free_cid = p_cep->cid;\r
-\r
- KeCancelTimer( &p_cep->timewait_timer );\r
-\r
- ExFreeToNPagedLookasideList( &gp_cep_mgr->cep_pool, p_cep );\r
-\r
- deref_al_obj( &gp_cep_mgr->obj );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_create_cep(\r
- IN ib_al_handle_t h_al,\r
- IN al_pfn_cep_cb_t pfn_cb,\r
- IN void* __ptr64 context,\r
- OUT net32_t* const p_cid )\r
-{\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( p_cid );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __create_cep();\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed to allocate CEP.\n") );\r
- return IB_INSUFFICIENT_MEMORY;\r
- }\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
-\r
- __bind_cep( p_cep, h_al, pfn_cb, context );\r
-\r
- *p_cid = p_cep->cid;\r
-\r
- AL_PRINT(TRACE_LEVEL_INFORMATION ,AL_DBG_CM ,\r
- ("Created CEP with cid %d, h_al %p, context %p \n", \r
- p_cep->cid, h_al, p_cep->context ));\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_destroy_cep(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- IN ib_pfn_destroy_cb_t pfn_destroy_cb )\r
-{\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
- void *context;\r
- int32_t ref_cnt;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
-\r
- /*\r
- * Remove the CEP from the CID vector - no further API calls\r
- * will succeed for it.\r
- */\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- /* Invalid handle. */\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_PARAMETER;\r
- }\r
-\r
- context = p_cep->context;\r
- p_cep->pfn_destroy_cb = pfn_destroy_cb;\r
-\r
- /* Cancel any queued IRP */\r
- __cep_complete_irp( p_cep, STATUS_CANCELLED, IO_NO_INCREMENT );\r
-\r
- __unbind_cep( p_cep );\r
- ref_cnt = __cleanup_cep( p_cep );\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
-\r
- /*\r
- * Done waiting. Release the reference so the timewait timer callback\r
- * can finish cleaning up.\r
- */\r
- if( !ref_cnt && pfn_destroy_cb )\r
- pfn_destroy_cb( context );\r
-\r
- AL_PRINT(TRACE_LEVEL_INFORMATION ,AL_DBG_CM ,\r
- ("Destroeyd CEP with cid %d, h_al %p, context %p \n", \r
- cid, h_al, context ));\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_cep_listen(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- IN ib_cep_listen_t* const p_listen_info )\r
-{\r
- ib_api_status_t status;\r
- kcep_t *p_cep, *p_listen;\r
- cl_rbmap_item_t *p_item, *p_insert_at;\r
- boolean_t left = TRUE;\r
- intn_t cmp;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
- CL_ASSERT( p_listen_info );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_PRE_REQ:\r
- CL_ASSERT( p_cep->p_mad );\r
- ib_put_mad( p_cep->p_mad );\r
- p_cep->p_mad = NULL;\r
- /* Must change state here in case listen fails */\r
- p_cep->state = CEP_STATE_IDLE;\r
- /* Fall through. */\r
- case CEP_STATE_IDLE:\r
- break;\r
- default:\r
- AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM,\r
- ("Invalid state: %d\n", p_cep->state) );\r
- status = IB_INVALID_STATE;\r
- goto done;\r
- }\r
-\r
- /* Insert the CEP into the listen map. */\r
- p_item = cl_rbmap_root( &gp_cep_mgr->listen_map );\r
- p_insert_at = p_item;\r
- while( p_item != cl_rbmap_end( &gp_cep_mgr->listen_map ) )\r
- {\r
- p_insert_at = p_item;\r
-\r
- p_listen = PARENT_STRUCT( p_item, kcep_t, listen_item );\r
-\r
- if( p_listen_info->svc_id == p_listen->sid )\r
- goto port_cmp;\r
- \r
- if( p_listen_info->svc_id < p_listen->sid )\r
- p_item = cl_rbmap_left( p_item ), left = TRUE;\r
- else\r
- p_item = cl_rbmap_right( p_item ), left = FALSE;\r
-\r
- continue;\r
-\r
-port_cmp:\r
- if( p_listen_info->port_guid != IB_ALL_PORTS )\r
- {\r
- if( p_listen_info->port_guid == p_listen->port_guid )\r
- goto pdata_cmp;\r
- \r
- if( p_listen_info->port_guid < p_listen->port_guid )\r
- p_item = cl_rbmap_left( p_item ), left = TRUE;\r
- else\r
- p_item = cl_rbmap_right( p_item ), left = FALSE;\r
-\r
- continue;\r
- }\r
-\r
-pdata_cmp:\r
- /*\r
- * If an existing listen doesn't have a compare buffer,\r
- * then we found a duplicate.\r
- */\r
- if( !p_listen->p_cmp_buf || !p_listen_info->p_cmp_buf )\r
- break;\r
-\r
- if( p_listen_info->p_cmp_buf )\r
- {\r
- /* Compare length must match. */\r
- if( p_listen_info->cmp_len != p_listen->cmp_len )\r
- break;\r
-\r
- /* Compare offset must match. */\r
- if( p_listen_info->cmp_offset != p_listen->cmp_offset )\r
- break;\r
-\r
- cmp = cl_memcmp( &p_listen_info->p_cmp_buf,\r
- p_listen->p_cmp_buf, p_listen->cmp_len );\r
-\r
- if( cmp < 0 )\r
- p_item = cl_rbmap_left( p_item ), left = TRUE;\r
- else if( cmp > 0 )\r
- p_item = cl_rbmap_right( p_item ), left = FALSE;\r
- else\r
- break;\r
-\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM,\r
- ("Svc ID match but compare buffer mismatch.\n") );\r
- continue;\r
- }\r
- }\r
-\r
- if( p_item != cl_rbmap_end( &gp_cep_mgr->listen_map ) )\r
- {\r
- /* Duplicate!!! */\r
- status = IB_INVALID_SETTING;\r
- goto done;\r
- }\r
-\r
- /* Set up the CEP. */\r
- if( p_listen_info->p_cmp_buf )\r
- {\r
- p_cep->p_cmp_buf = cl_malloc( p_listen_info->cmp_len );\r
- if( !p_cep->p_cmp_buf )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Failed to allocate compare buffer.\n") );\r
- status = IB_INSUFFICIENT_MEMORY;\r
- goto done;\r
- }\r
-\r
- cl_memcpy( p_cep->p_cmp_buf,\r
- p_listen_info->p_cmp_buf, p_listen_info->cmp_len );\r
- }\r
- p_cep->cmp_len = p_listen_info->cmp_len;\r
- p_cep->cmp_offset = p_listen_info->cmp_offset;\r
- p_cep->sid = p_listen_info->svc_id;\r
- p_cep->port_guid = p_listen_info->port_guid;\r
- p_cep->state = CEP_STATE_LISTEN;\r
-\r
- cl_rbmap_insert( &gp_cep_mgr->listen_map, p_insert_at,\r
- &p_cep->listen_item, left );\r
-\r
- status = IB_SUCCESS;\r
-\r
-done:\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-static cep_agent_t*\r
-__format_path_av(\r
- IN const ib_path_rec_t* const p_path,\r
- OUT kcep_av_t* const p_av )\r
-{\r
- cep_agent_t* p_port_cep;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( p_path );\r
- CL_ASSERT( p_av );\r
-\r
- cl_memclr( p_av, sizeof(kcep_av_t) );\r
-\r
- p_port_cep = __find_port_cep( &p_path->sgid, p_path->slid,\r
- p_path->pkey, &p_av->pkey_index );\r
- if( !p_port_cep )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return NULL;\r
- }\r
-\r
- p_av->port_guid = p_port_cep->port_guid;\r
-\r
- p_av->attr.port_num = p_port_cep->port_num;\r
-\r
- p_av->attr.sl = ib_path_rec_sl( p_path );\r
- p_av->attr.dlid = p_path->dlid;\r
-\r
- p_av->attr.grh.ver_class_flow = ib_grh_set_ver_class_flow(\r
- 1, p_path->tclass, ib_path_rec_flow_lbl( p_path ) );\r
- p_av->attr.grh.hop_limit = ib_path_rec_hop_limit( p_path );\r
- p_av->attr.grh.src_gid = p_path->sgid;\r
- p_av->attr.grh.dest_gid = p_path->dgid;\r
-\r
- p_av->attr.grh_valid = !ib_gid_is_link_local( &p_path->dgid );\r
-\r
- p_av->attr.static_rate = ib_path_rec_rate( p_path );\r
- p_av->attr.path_bits = (uint8_t)(p_path->slid - p_port_cep->base_lid);\r
-\r
- /*\r
- * Note that while we never use the connected AV attributes internally,\r
- * we store them so we can pass them back to users.\r
- */\r
- p_av->attr.conn.path_mtu = ib_path_rec_mtu( p_path );\r
- p_av->attr.conn.local_ack_timeout = calc_lcl_ack_timeout(\r
- ib_path_rec_pkt_life( p_path ) + 1, 0 );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return p_port_cep;\r
-}\r
-\r
-\r
-/*\r
- * Formats a REQ mad's path information given a path record.\r
- */\r
-static void\r
-__format_req_path(\r
- IN const ib_path_rec_t* const p_path,\r
- IN const uint8_t ack_delay,\r
- OUT req_path_info_t* const p_req_path )\r
-{\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_req_path->local_lid = p_path->slid;\r
- p_req_path->remote_lid = p_path->dlid;\r
- p_req_path->local_gid = p_path->sgid;\r
- p_req_path->remote_gid = p_path->dgid;\r
-\r
- conn_req_path_set_flow_lbl( ib_path_rec_flow_lbl( p_path ),\r
- p_req_path );\r
- conn_req_path_set_pkt_rate( ib_path_rec_rate( p_path ),\r
- p_req_path );\r
-\r
- /* Traffic class & hop limit */\r
- p_req_path->traffic_class = p_path->tclass;\r
- p_req_path->hop_limit = ib_path_rec_hop_limit( p_path );\r
-\r
- /* SL & Subnet Local fields */\r
- conn_req_path_set_svc_lvl( ib_path_rec_sl( p_path ),\r
- p_req_path );\r
- conn_req_path_set_subn_lcl(\r
- ib_gid_is_link_local( &p_path->dgid ), p_req_path );\r
-\r
- conn_req_path_set_lcl_ack_timeout(\r
- calc_lcl_ack_timeout( ib_path_rec_pkt_life( p_path ) + 1,\r
- ack_delay ), p_req_path );\r
-\r
- conn_req_path_clr_rsvd_fields( p_req_path );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__format_req(\r
- IN kcep_t* const p_cep,\r
- IN const ib_cm_req_t* const p_cm_req )\r
-{\r
- ib_api_status_t status;\r
- mad_cm_req_t* p_req;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( p_cep );\r
- CL_ASSERT( p_cm_req );\r
- CL_ASSERT( p_cep->p_mad );\r
-\r
- /* Format the MAD header. */\r
- __format_mad_hdr( p_cep->p_mad->p_mad_buf, p_cep, CM_REQ_ATTR_ID );\r
-\r
- /* Set the addressing information in the MAD. */\r
- __format_mad_av( p_cep->p_mad, &p_cep->av[p_cep->idx_primary] );\r
-\r
- p_req = (mad_cm_req_t*)p_cep->p_mad->p_mad_buf;\r
-\r
- ci_ca_lock_attr( p_cm_req->h_qp->obj.p_ci_ca );\r
- /*\r
- * Store the local CA's ack timeout for use when computing\r
- * the local ACK timeout.\r
- */\r
- p_cep->local_ack_delay =\r
- p_cm_req->h_qp->obj.p_ci_ca->p_pnp_attr->local_ack_delay;\r
- ci_ca_unlock_attr( p_cm_req->h_qp->obj.p_ci_ca );\r
-\r
- /* Format the primary path. */\r
- __format_req_path( p_cm_req->p_primary_path,\r
- p_cep->local_ack_delay, &p_req->primary_path );\r
-\r
- if( p_cm_req->p_alt_path )\r
- {\r
- /* Format the alternate path. */\r
- __format_req_path( p_cm_req->p_alt_path,\r
- p_cep->local_ack_delay, &p_req->alternate_path );\r
- }\r
- else\r
- {\r
- cl_memclr( &p_req->alternate_path, sizeof(req_path_info_t) );\r
- }\r
-\r
- /* Set the local communication in the REQ. */\r
- p_req->local_comm_id = p_cep->local_comm_id;\r
- p_req->sid = p_cm_req->svc_id;\r
- p_req->local_ca_guid = p_cm_req->h_qp->obj.p_ci_ca->verbs.guid;\r
-\r
- conn_req_set_lcl_qpn( p_cep->local_qpn, p_req );\r
- conn_req_set_resp_res( p_cm_req->resp_res, p_req );\r
- conn_req_set_init_depth( p_cm_req->init_depth, p_req );\r
- conn_req_set_remote_resp_timeout( p_cm_req->remote_resp_timeout, p_req );\r
- conn_req_set_qp_type( p_cm_req->h_qp->type, p_req );\r
- conn_req_set_flow_ctrl( p_cm_req->flow_ctrl, p_req );\r
- conn_req_set_starting_psn( p_cep->rq_psn, p_req );\r
-\r
- conn_req_set_lcl_resp_timeout( p_cm_req->local_resp_timeout, p_req );\r
- conn_req_set_retry_cnt( p_cm_req->retry_cnt, p_req );\r
-\r
- p_req->pkey = p_cm_req->p_primary_path->pkey;\r
-\r
- conn_req_set_mtu( ib_path_rec_mtu( p_cm_req->p_primary_path ), p_req );\r
- conn_req_set_rnr_retry_cnt( p_cm_req->rnr_retry_cnt, p_req );\r
-\r
- conn_req_set_max_cm_retries( p_cm_req->max_cm_retries, p_req );\r
- status = conn_req_set_pdata(\r
- p_cm_req->p_req_pdata, p_cm_req->req_length, p_req );\r
-\r
- conn_req_clr_rsvd_fields( p_req );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__save_user_req(\r
- IN kcep_t* const p_cep,\r
- IN const ib_cm_req_t* const p_cm_req,\r
- OUT cep_agent_t** const pp_port_cep )\r
-{\r
- cep_agent_t *p_port_cep;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- if( !p_cm_req->p_primary_path )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid primary path record.\n") );\r
- return IB_INVALID_SETTING;\r
- }\r
-\r
- p_cep->sid = p_cm_req->svc_id;\r
-\r
- p_cep->idx_primary = 0;\r
-\r
- p_cep->p2p = (p_cm_req->pfn_cm_req_cb != NULL);\r
-\r
- if( p_cm_req->p_compare_buffer )\r
- {\r
- if( !p_cm_req->compare_length ||\r
- (p_cm_req->compare_offset + p_cm_req->compare_length) >\r
- IB_REQ_PDATA_SIZE )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_SETTING;\r
- }\r
- p_cep->p_cmp_buf = cl_malloc( p_cm_req->compare_length );\r
- if( !p_cep->p_cmp_buf )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INSUFFICIENT_MEMORY;\r
- }\r
-\r
- cl_memcpy( p_cep->p_cmp_buf,\r
- p_cm_req->p_compare_buffer, p_cm_req->compare_length );\r
-\r
- p_cep->cmp_len = p_cm_req->compare_length;\r
- p_cep->cmp_offset = p_cm_req->compare_offset;\r
- }\r
- else\r
- {\r
- p_cep->p_cmp_buf = NULL;\r
- p_cep->cmp_len = 0;\r
- p_cep->cmp_offset = 0;\r
- }\r
- p_cep->was_active = TRUE;\r
-\r
- /* Validate the primary path. */\r
- p_port_cep = __format_path_av( p_cm_req->p_primary_path, &p_cep->av[0] );\r
- if( !p_port_cep )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Primary path unrealizable.\n") );\r
- return IB_INVALID_SETTING;\r
- }\r
-\r
- p_cep->av[0].attr.conn.seq_err_retry_cnt = p_cm_req->retry_cnt;\r
-\r
- /* Make sure the paths will work on the desired QP. */\r
- if( p_port_cep->h_ca->obj.p_ci_ca->verbs.guid !=\r
- p_cm_req->h_qp->obj.p_ci_ca->verbs.guid )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Primary path not realizable on given QP.\n") );\r
- return IB_INVALID_SETTING;\r
- }\r
-\r
- p_cep->local_ca_guid = p_port_cep->h_ca->obj.p_ci_ca->verbs.guid;\r
-\r
- *pp_port_cep = p_port_cep;\r
-\r
- /*\r
- * Store the PKEY so we can ensure that alternate paths are\r
- * on the same partition.\r
- */\r
- p_cep->pkey = p_cm_req->p_primary_path->pkey;\r
- \r
- p_cep->max_2pkt_life = ib_path_rec_pkt_life( p_cm_req->p_primary_path ) + 1;\r
-\r
- if( p_cm_req->p_alt_path )\r
- {\r
- /* MTUs must match since they are specified only once. */\r
- if( ib_path_rec_mtu( p_cm_req->p_primary_path ) !=\r
- ib_path_rec_mtu( p_cm_req->p_alt_path ) )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Mismatched primary and alternate path MTUs.\n") );\r
- return IB_INVALID_SETTING;\r
- }\r
-\r
- /* The PKEY must match too. */\r
- if( p_cm_req->p_alt_path->pkey != p_cm_req->p_primary_path->pkey )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Mismatched pimary and alternate PKEYs.\n") );\r
- return IB_INVALID_SETTING;\r
- }\r
-\r
- p_port_cep =\r
- __format_path_av( p_cm_req->p_alt_path, &p_cep->av[1] );\r
- if( p_port_cep &&\r
- p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid )\r
- {\r
- /* Alternate path is not on same CA. */\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Alternate path unrealizable.\n") );\r
- return IB_INVALID_SETTING;\r
- }\r
-\r
- p_cep->av[1].attr.conn.seq_err_retry_cnt = p_cm_req->retry_cnt;\r
-\r
- p_cep->max_2pkt_life = max( p_cep->max_2pkt_life,\r
- (ib_path_rec_pkt_life( p_cm_req->p_alt_path ) + 1) );\r
- }\r
- else\r
- {\r
- cl_memclr( &p_cep->av[1], sizeof(kcep_av_t) );\r
- }\r
-\r
- p_cep->p_cid->modifier++;\r
- /*\r
- * We don't ever want a modifier of zero for the CID at index zero\r
- * since it would result in a total CID of zero.\r
- */\r
- if( !p_cep->cid && !p_cep->p_cid->modifier )\r
- p_cep->p_cid->modifier++;\r
-\r
- /* Store pertinent information in the connection. */\r
- p_cep->local_comm_id = p_cep->cid | (p_cep->p_cid->modifier << 24);\r
- p_cep->remote_comm_id = 0;\r
-\r
- /* Cache the local QPN. */\r
- p_cep->local_qpn = p_cm_req->h_qp->num;\r
- p_cep->remote_ca_guid = 0;\r
- p_cep->remote_qpn = 0;\r
-\r
- /* Retry timeout is remote CM response timeout plus 2 * packet life. */\r
- p_cep->retry_timeout = __calc_mad_timeout( p_cep->max_2pkt_life ) +\r
- __calc_mad_timeout( p_cm_req->remote_resp_timeout );\r
- \r
-\r
- /* Store the retry count. */\r
- p_cep->max_cm_retries = p_cm_req->max_cm_retries;\r
-\r
- /*\r
- * Clear the maximum packet lifetime, used to calculate timewait.\r
- * It will be set when we transition into the established state.\r
- */\r
- p_cep->timewait_time.QuadPart = 0;\r
-\r
- p_cep->rq_psn = p_cep->local_qpn;\r
-\r
- p_cep->rnr_nak_timeout = p_cm_req->rnr_nak_timeout;\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_cep_pre_req(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- IN const ib_cm_req_t* const p_cm_req,\r
- OUT ib_qp_mod_t* const p_init )\r
-{\r
- ib_api_status_t status;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
- cep_agent_t *p_port_cep;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
- CL_ASSERT( p_cm_req );\r
- CL_ASSERT( p_init );\r
-\r
- /* TODO: Code P2P support. */\r
- if( p_cm_req->pfn_cm_req_cb )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_UNSUPPORTED;\r
- }\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_PRE_REQ:\r
- CL_ASSERT( p_cep->p_mad );\r
- ib_put_mad( p_cep->p_mad );\r
- p_cep->p_mad = NULL;\r
- /* Fall through. */\r
- case CEP_STATE_IDLE:\r
- status = __save_user_req( p_cep, p_cm_req, &p_port_cep );\r
- if( status != IB_SUCCESS )\r
- break;\r
-\r
- status =\r
- ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_cep->p_mad );\r
- if( status != IB_SUCCESS )\r
- break;\r
-\r
- status = __format_req( p_cep, p_cm_req );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid pdata length.\n") );\r
- ib_put_mad( p_cep->p_mad );\r
- p_cep->p_mad = NULL;\r
- break;\r
- }\r
-\r
- /* Format the INIT qp modify attributes. */\r
- p_init->req_state = IB_QPS_INIT;\r
- p_init->state.init.primary_port =\r
- p_cep->av[p_cep->idx_primary].attr.port_num;\r
- p_init->state.init.qkey = 0;\r
- p_init->state.init.pkey_index =\r
- p_cep->av[p_cep->idx_primary].pkey_index;\r
- p_init->state.init.access_ctrl = IB_AC_LOCAL_WRITE;\r
-\r
- p_cep->state = CEP_STATE_PRE_REQ;\r
- break;\r
-\r
- case CEP_STATE_TIMEWAIT:\r
- status = IB_QP_IN_TIMEWAIT;\r
- break;\r
-\r
- default:\r
- AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM,\r
- ("Invalid state: %d\n", p_cep->state) );\r
- status = IB_INVALID_STATE;\r
- }\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_cep_send_req(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid )\r
-{\r
- ib_api_status_t status;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
- cep_agent_t *p_port_cep;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_PRE_REQ:\r
- CL_ASSERT( p_cep->p_mad );\r
- p_port_cep = __get_cep_agent( p_cep );\r
- if( !p_port_cep )\r
- {\r
- ib_put_mad( p_cep->p_mad );\r
- p_cep->state = CEP_STATE_IDLE;\r
- status = IB_INVALID_SETTING;\r
- }\r
- else\r
- {\r
- status = __cep_send_retry( p_port_cep, p_cep, p_cep->p_mad );\r
-\r
- if( status == IB_SUCCESS )\r
- p_cep->state = CEP_STATE_REQ_SENT;\r
- else\r
- p_cep->state = CEP_STATE_IDLE;\r
- }\r
- p_cep->p_mad = NULL;\r
- break;\r
-\r
- default:\r
- AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM,\r
- ("Invalid state: %d\n", p_cep->state) );\r
- status = IB_INVALID_STATE;\r
- }\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-static void\r
-__save_user_rep(\r
- IN kcep_t* const p_cep,\r
- IN const ib_cm_rep_t* const p_cm_rep )\r
-{\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- /* Cache the local QPN. */\r
- p_cep->local_qpn = p_cm_rep->h_qp->num;\r
- p_cep->rq_psn = p_cep->local_qpn;\r
- p_cep->init_depth = p_cm_rep->init_depth;\r
-\r
- ci_ca_lock_attr( p_cm_rep->h_qp->obj.p_ci_ca );\r
- /* Check the CA's responder resource max and trim if necessary. */\r
- if( (p_cm_rep->h_qp->obj.p_ci_ca->p_pnp_attr->max_qp_resp_res <\r
- p_cep->req_init_depth) )\r
- {\r
- /*\r
- * The CA cannot handle the requested responder resources.\r
- * Set the response to the CA's maximum.\r
- */\r
- p_cep->resp_res = \r
- p_cm_rep->h_qp->obj.p_ci_ca->p_pnp_attr->max_qp_resp_res;\r
- }\r
- else\r
- {\r
- /* The CA supports the requested responder resources. */\r
- p_cep->resp_res = p_cep->req_init_depth;\r
- }\r
- ci_ca_unlock_attr( p_cm_rep->h_qp->obj.p_ci_ca );\r
-\r
- p_cep->rnr_nak_timeout = p_cm_rep->rnr_nak_timeout;\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__format_rep(\r
- IN kcep_t* const p_cep,\r
- IN const ib_cm_rep_t* const p_cm_rep )\r
-{\r
- ib_api_status_t status;\r
- mad_cm_rep_t *p_rep;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( p_cep );\r
- CL_ASSERT( p_cm_rep );\r
- CL_ASSERT( p_cep->p_mad );\r
-\r
- /* Format the MAD header. */\r
- __format_mad_hdr( p_cep->p_mad->p_mad_buf, p_cep, CM_REP_ATTR_ID );\r
-\r
- /* Set the addressing information in the MAD. */\r
- __format_mad_av( p_cep->p_mad, &p_cep->av[p_cep->idx_primary] );\r
-\r
- p_rep = (mad_cm_rep_t*)p_cep->p_mad->p_mad_buf;\r
-\r
- p_rep->local_comm_id = p_cep->local_comm_id;\r
- p_rep->remote_comm_id = p_cep->remote_comm_id;\r
- conn_rep_set_lcl_qpn( p_cep->local_qpn, p_rep );\r
- conn_rep_set_starting_psn( p_cep->rq_psn, p_rep );\r
-\r
- if( p_cm_rep->failover_accepted != IB_FAILOVER_ACCEPT_SUCCESS )\r
- {\r
- /*\r
- * Failover rejected - clear the alternate AV information.\r
- * Note that at this point, the alternate is always at index 1.\r
- */\r
- cl_memclr( &p_cep->av[1], sizeof(kcep_av_t) );\r
- }\r
- else if( !p_cep->av[1].port_guid )\r
- {\r
- /*\r
- * Always reject alternate path if it's zero. We might\r
- * have cleared the AV because it was unrealizable when\r
- * processing the REQ.\r
- */\r
- conn_rep_set_failover( IB_FAILOVER_ACCEPT_ERROR, p_rep );\r
- }\r
- else\r
- {\r
- conn_rep_set_failover( p_cm_rep->failover_accepted, p_rep );\r
- }\r
-\r
- p_rep->resp_resources = p_cep->resp_res;\r
-\r
- ci_ca_lock_attr( p_cm_rep->h_qp->obj.p_ci_ca );\r
- conn_rep_set_target_ack_delay(\r
- p_cm_rep->h_qp->obj.p_ci_ca->p_pnp_attr->local_ack_delay, p_rep );\r
- ci_ca_unlock_attr( p_cm_rep->h_qp->obj.p_ci_ca );\r
-\r
- p_rep->initiator_depth = p_cep->init_depth;\r
-\r
- conn_rep_set_e2e_flow_ctl( p_cm_rep->flow_ctrl, p_rep );\r
-\r
- conn_rep_set_rnr_retry_cnt(\r
- (uint8_t)(p_cm_rep->rnr_retry_cnt & 0x07), p_rep );\r
-\r
- /* Local CA guid should have been set when processing the received REQ. */\r
- CL_ASSERT( p_cep->local_ca_guid );\r
- p_rep->local_ca_guid = p_cep->local_ca_guid;\r
-\r
- status = conn_rep_set_pdata(\r
- p_cm_rep->p_rep_pdata, p_cm_rep->rep_length, p_rep );\r
-\r
- conn_rep_clr_rsvd_fields( p_rep );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-\r
-ib_api_status_t\r
-__al_cep_pre_rep(\r
- IN kcep_t *p_cep,\r
- IN void* __ptr64 context,\r
- IN const ib_cm_rep_t* const p_cm_rep,\r
- OUT ib_qp_mod_t* const p_init )\r
-{\r
- ib_api_status_t status;\r
- cep_agent_t *p_port_cep;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( p_cm_rep );\r
- CL_ASSERT( p_init );\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_PRE_REP:\r
- case CEP_STATE_PRE_REP_MRA_SENT:\r
- CL_ASSERT( p_cep->p_mad );\r
- ib_put_mad( p_cep->p_mad );\r
- p_cep->p_mad = NULL;\r
- /* Fall through. */\r
- case CEP_STATE_REQ_RCVD:\r
- case CEP_STATE_REQ_MRA_SENT:\r
- CL_ASSERT( !p_cep->p_mad );\r
- status =\r
- __cep_get_mad( p_cep, CM_REP_ATTR_ID, &p_port_cep, &p_cep->p_mad );\r
- if( status != IB_SUCCESS )\r
- break;\r
-\r
- __save_user_rep( p_cep, p_cm_rep );\r
-\r
- status = __format_rep( p_cep, p_cm_rep );\r
- if( status != IB_SUCCESS )\r
- {\r
- ib_put_mad( p_cep->p_mad );\r
- p_cep->p_mad = NULL;\r
- break;\r
- }\r
-\r
- /* Format the INIT qp modify attributes. */\r
- p_init->req_state = IB_QPS_INIT;\r
- p_init->state.init.primary_port =\r
- p_cep->av[p_cep->idx_primary].attr.port_num;\r
- p_init->state.init.qkey = 0;\r
- p_init->state.init.pkey_index =\r
- p_cep->av[p_cep->idx_primary].pkey_index;\r
- p_init->state.init.access_ctrl = IB_AC_LOCAL_WRITE;\r
-\r
- p_cep->context = context;\r
-\r
- /* Just OR in the PREP bit into the state. */\r
- p_cep->state |= CEP_STATE_PREP;\r
- break;\r
-\r
- default:\r
- AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM,\r
- ("Invalid state: %d\n", p_cep->state) );\r
- status = IB_INVALID_STATE;\r
- }\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_cep_pre_rep(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- IN void* __ptr64 context,\r
- IN const ib_cm_rep_t* const p_cm_rep,\r
- OUT ib_qp_mod_t* const p_init )\r
-{\r
- ib_api_status_t status;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
- CL_ASSERT( p_cm_rep );\r
- CL_ASSERT( p_init );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- status = __al_cep_pre_rep( p_cep, context, p_cm_rep, p_init );\r
-\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_cep_pre_rep_ex(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- IN al_pfn_cep_cb_t pfn_cb,\r
- IN void* __ptr64 context,\r
- IN const ib_cm_rep_t* const p_cm_rep,\r
- OUT ib_qp_mod_t* const p_init )\r
-{\r
- ib_api_status_t status;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
- CL_ASSERT( p_cm_rep );\r
- CL_ASSERT( p_init );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- status = __al_cep_pre_rep( p_cep, context, p_cm_rep, p_init );\r
-\r
- if (status == IB_SUCCESS)\r
- {\r
- p_cep->pfn_cb = pfn_cb;\r
- }\r
-\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-ib_api_status_t\r
-al_cep_send_rep(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid )\r
-{\r
- ib_api_status_t status;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
- cep_agent_t *p_port_cep;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_PRE_REP:\r
- case CEP_STATE_PRE_REP_MRA_SENT:\r
- CL_ASSERT( p_cep->p_mad );\r
- p_port_cep = __get_cep_agent( p_cep );\r
- if( !p_port_cep )\r
- {\r
- ib_put_mad( p_cep->p_mad );\r
- p_cep->state = CEP_STATE_IDLE;\r
- status = IB_INSUFFICIENT_RESOURCES;\r
- }\r
- else\r
- {\r
- status = __cep_send_retry( p_port_cep, p_cep, p_cep->p_mad );\r
- if( status == IB_SUCCESS )\r
- {\r
- p_cep->state = CEP_STATE_REP_SENT;\r
- }\r
- else\r
- {\r
- __remove_cep( p_cep );\r
- p_cep->state = CEP_STATE_IDLE;\r
- }\r
- }\r
- p_cep->p_mad = NULL;\r
- break;\r
-\r
- default:\r
- AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM,\r
- ("Invalid state: %d\n", p_cep->state) );\r
- status = IB_INVALID_STATE;\r
- }\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-static inline ib_api_status_t\r
-__format_rtu(\r
- IN kcep_t* const p_cep, \r
- IN const uint8_t* p_pdata OPTIONAL,\r
- IN uint8_t pdata_len,\r
- IN OUT ib_mad_element_t* const p_mad )\r
-{\r
- ib_api_status_t status;\r
- mad_cm_rtu_t *p_rtu;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_rtu = (mad_cm_rtu_t*)p_mad->p_mad_buf;\r
-\r
- p_rtu->local_comm_id = p_cep->local_comm_id;\r
- p_rtu->remote_comm_id = p_cep->remote_comm_id;\r
-\r
- /* copy optional data */\r
- status = conn_rtu_set_pdata( p_pdata, pdata_len, p_rtu );\r
-\r
- /* Store the RTU MAD so we can repeat it if we get a repeated REP. */\r
- if( status == IB_SUCCESS )\r
- p_cep->mads.rtu = *p_rtu;\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_cep_rtu(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- IN const uint8_t* p_pdata OPTIONAL,\r
- IN uint8_t pdata_len )\r
-{\r
- ib_api_status_t status;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
- cep_agent_t *p_port_cep;\r
- ib_mad_element_t *p_mad;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_REP_RCVD:\r
- case CEP_STATE_REP_MRA_SENT:\r
- status = __cep_get_mad( p_cep, CM_RTU_ATTR_ID, &p_port_cep, &p_mad );\r
- if( status != IB_SUCCESS )\r
- break;\r
-\r
- status = __format_rtu( p_cep, p_pdata, pdata_len, p_mad );\r
- if( status != IB_SUCCESS )\r
- {\r
- ib_put_mad( p_mad );\r
- break;\r
- }\r
-\r
- /* Update the timewait time. */\r
- __calc_timewait( p_cep );\r
-\r
- p_cep->state = CEP_STATE_ESTABLISHED;\r
-\r
- __cep_send_mad( p_port_cep, p_mad );\r
- /* Send failures will get another chance if we receive a repeated REP. */\r
- status = IB_SUCCESS;\r
- break;\r
-\r
- default:\r
- AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM,\r
- ("Invalid state: %d\n", p_cep->state) );\r
- status = IB_INVALID_STATE;\r
- }\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_cep_rej(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- IN ib_rej_status_t rej_status,\r
- IN const uint8_t* const p_ari,\r
- IN uint8_t ari_len,\r
- IN const uint8_t* const p_pdata,\r
- IN uint8_t pdata_len )\r
-{\r
- ib_api_status_t status;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_REQ_RCVD:\r
- case CEP_STATE_REQ_MRA_SENT:\r
- status = __do_cep_rej(\r
- p_cep, rej_status, p_ari, ari_len, p_pdata, pdata_len );\r
- __remove_cep( p_cep );\r
- p_cep->state = CEP_STATE_IDLE;\r
- break;\r
-\r
- case CEP_STATE_REP_RCVD:\r
- case CEP_STATE_REP_MRA_SENT:\r
- status = __do_cep_rej(\r
- p_cep, rej_status, p_ari, ari_len, p_pdata, pdata_len );\r
- p_cep->state = CEP_STATE_TIMEWAIT;\r
- __insert_timewait( p_cep );\r
- break;\r
-\r
- default:\r
- AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM,\r
- ("Invalid state: %d\n", p_cep->state) );\r
- status = IB_INVALID_STATE;\r
- }\r
-\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__format_mra(\r
- IN kcep_t* const p_cep,\r
- IN const uint8_t msg_mraed,\r
- IN const ib_cm_mra_t* const p_cm_mra,\r
- IN OUT ib_mad_element_t* const p_mad )\r
-{\r
- ib_api_status_t status;\r
- mad_cm_mra_t *p_mra;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_mra = (mad_cm_mra_t*)p_mad->p_mad_buf;\r
-\r
- conn_mra_set_msg_mraed( msg_mraed, p_mra );\r
-\r
- p_mra->local_comm_id = p_cep->local_comm_id;\r
- p_mra->remote_comm_id = p_cep->remote_comm_id;\r
-\r
- conn_mra_set_svc_timeout( p_cm_mra->svc_timeout, p_mra );\r
- status = conn_mra_set_pdata(\r
- p_cm_mra->p_mra_pdata, p_cm_mra->mra_length, p_mra );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
- }\r
- conn_mra_clr_rsvd_fields( p_mra );\r
-\r
- /* Save the MRA so we can repeat it if we get a repeated message. */\r
- p_cep->mads.mra = *p_mra;\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_cep_mra(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- IN const ib_cm_mra_t* const p_cm_mra )\r
-{\r
- ib_api_status_t status;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
- cep_agent_t *p_port_cep;\r
- ib_mad_element_t *p_mad;\r
- uint8_t msg_mraed;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
- CL_ASSERT( p_cm_mra );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_REQ_RCVD:\r
- case CEP_STATE_PRE_REP:\r
- msg_mraed = 0;\r
- break;\r
-\r
- case CEP_STATE_REP_RCVD:\r
- msg_mraed = 1;\r
- break;\r
-\r
- case CEP_STATE_PRE_APR:\r
- case CEP_STATE_LAP_RCVD:\r
- msg_mraed = 2;\r
- break;\r
-\r
- default:\r
- status = IB_INVALID_STATE;\r
- goto done;\r
- }\r
-\r
- status = __cep_get_mad( p_cep, CM_MRA_ATTR_ID, &p_port_cep, &p_mad );\r
- if( status != IB_SUCCESS )\r
- goto done;\r
-\r
- status = __format_mra( p_cep, msg_mraed, p_cm_mra, p_mad );\r
- if( status != IB_SUCCESS )\r
- {\r
- ib_put_mad( p_mad );\r
- goto done;\r
- }\r
-\r
- p_cep->state |= CEP_STATE_MRA;\r
-\r
- __cep_send_mad( p_port_cep, p_mad );\r
- status = IB_SUCCESS;\r
-\r
-done:\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-\r
-static ib_api_status_t\r
-__format_lap(\r
- IN kcep_t* const p_cep,\r
- IN const ib_cm_lap_t* const p_cm_lap,\r
- IN OUT ib_mad_element_t* const p_mad )\r
-{\r
- ib_api_status_t status;\r
- mad_cm_lap_t *p_lap;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- __format_mad_hdr( p_mad->p_mad_buf, p_cep, CM_LAP_ATTR_ID );\r
-\r
- __format_mad_av( p_mad, &p_cep->av[p_cep->idx_primary] );\r
-\r
- p_lap = (mad_cm_lap_t*)p_mad->p_mad_buf;\r
-\r
- p_lap->alternate_path.local_lid = p_cm_lap->p_alt_path->slid;\r
- p_lap->alternate_path.remote_lid = p_cm_lap->p_alt_path->dlid;\r
- p_lap->alternate_path.local_gid = p_cm_lap->p_alt_path->sgid;\r
- p_lap->alternate_path.remote_gid = p_cm_lap->p_alt_path->dgid;\r
-\r
- /* Set Flow Label and Packet Rate */\r
- conn_lap_path_set_flow_lbl(\r
- ib_path_rec_flow_lbl( p_cm_lap->p_alt_path ), &p_lap->alternate_path );\r
- conn_lap_path_set_tclass(\r
- p_cm_lap->p_alt_path->tclass, &p_lap->alternate_path );\r
-\r
- p_lap->alternate_path.hop_limit =\r
- ib_path_rec_hop_limit( p_cm_lap->p_alt_path );\r
- conn_lap_path_set_pkt_rate(\r
- ib_path_rec_rate( p_cm_lap->p_alt_path ), &p_lap->alternate_path );\r
-\r
- /* Set SL and Subnet Local */\r
- conn_lap_path_set_svc_lvl(\r
- ib_path_rec_sl( p_cm_lap->p_alt_path ), &p_lap->alternate_path );\r
- conn_lap_path_set_subn_lcl(\r
- ib_gid_is_link_local( &p_cm_lap->p_alt_path->dgid ),\r
- &p_lap->alternate_path );\r
-\r
- conn_lap_path_set_lcl_ack_timeout(\r
- calc_lcl_ack_timeout( ib_path_rec_pkt_life( p_cm_lap->p_alt_path ) + 1,\r
- p_cep->local_ack_delay), &p_lap->alternate_path );\r
-\r
- conn_lap_path_clr_rsvd_fields( &p_lap->alternate_path );\r
-\r
- p_lap->local_comm_id = p_cep->local_comm_id;\r
- p_lap->remote_comm_id = p_cep->remote_comm_id;\r
- conn_lap_set_remote_qpn( p_cep->remote_qpn, p_lap );\r
- conn_lap_set_resp_timeout( p_cm_lap->remote_resp_timeout, p_lap );\r
-\r
- status = conn_lap_set_pdata(\r
- p_cm_lap->p_lap_pdata, p_cm_lap->lap_length, p_lap );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("lap pdata invalid.\n") );\r
- return status;\r
- }\r
-\r
- conn_lap_clr_rsvd_fields( p_lap );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_cep_lap(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- IN const ib_cm_lap_t* const p_cm_lap )\r
-{\r
- ib_api_status_t status;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
- cep_agent_t *p_port_cep;\r
- ib_mad_element_t *p_mad;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
- CL_ASSERT( p_cm_lap );\r
- CL_ASSERT( p_cm_lap->p_alt_path );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_ESTABLISHED:\r
- if( !p_cep->was_active )\r
- {\r
- /* Only the side that took the active role can initialte a LAP. */\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Only the active side of a connection can initiate a LAP.\n") );\r
- status = IB_INVALID_STATE;\r
- break;\r
- }\r
-\r
- /*\r
- * Format the AV information - store in the temporary location until we\r
- * get the APR indicating acceptance.\r
- */\r
- p_port_cep = __format_path_av( p_cm_lap->p_alt_path, &p_cep->alt_av );\r
- if( !p_port_cep )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Alternate path invalid!\n") );\r
- status = IB_INVALID_SETTING;\r
- break;\r
- }\r
-\r
- p_cep->alt_av.attr.conn.seq_err_retry_cnt =\r
- p_cep->av[p_cep->idx_primary].attr.conn.seq_err_retry_cnt;\r
- p_cep->alt_av.attr.conn.rnr_retry_cnt =\r
- p_cep->av[p_cep->idx_primary].attr.conn.rnr_retry_cnt;\r
-\r
- if( p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Alternate CA GUID different from current!\n") );\r
- status = IB_INVALID_SETTING;\r
- break;\r
- }\r
-\r
- /* Store the alternate path info temporarilly. */\r
- p_cep->alt_2pkt_life = ib_path_rec_pkt_life( p_cm_lap->p_alt_path ) + 1;\r
-\r
- status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_mad );\r
- if( status != IB_SUCCESS )\r
- break;\r
-\r
- status = __format_lap( p_cep, p_cm_lap, p_mad );\r
- if( status != IB_SUCCESS )\r
- break;\r
-\r
- status = __cep_send_retry( p_port_cep, p_cep, p_mad );\r
- if( status == IB_SUCCESS )\r
- p_cep->state = CEP_STATE_LAP_SENT;\r
- break;\r
-\r
- default:\r
- AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM,\r
- ("Invalid state: %d\n", p_cep->state) );\r
- status = IB_INVALID_STATE;\r
- break;\r
- }\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__format_apr(\r
- IN kcep_t* const p_cep,\r
- IN const ib_cm_apr_t* const p_cm_apr,\r
- IN OUT ib_mad_element_t* const p_mad )\r
-{\r
- ib_api_status_t status;\r
- mad_cm_apr_t *p_apr;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_apr = (mad_cm_apr_t*)p_mad->p_mad_buf;\r
-\r
- p_apr->local_comm_id = p_cep->local_comm_id;\r
- p_apr->remote_comm_id = p_cep->remote_comm_id;\r
- p_apr->status = p_cm_apr->apr_status;\r
-\r
- status = conn_apr_set_apr_info( p_cm_apr->p_info->data,\r
- p_cm_apr->info_length, p_apr );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("apr_info invalid\n") );\r
- return status;\r
- }\r
-\r
- status = conn_apr_set_pdata( p_cm_apr->p_apr_pdata,\r
- p_cm_apr->apr_length, p_apr );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("apr pdata invalid\n") );\r
- return status;\r
- }\r
-\r
- conn_apr_clr_rsvd_fields( p_apr );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_cep_pre_apr(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- IN const ib_cm_apr_t* const p_cm_apr,\r
- OUT ib_qp_mod_t* const p_apr )\r
-{\r
- ib_api_status_t status;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
- cep_agent_t *p_port_cep;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
- CL_ASSERT( p_cm_apr );\r
- CL_ASSERT( p_apr );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_PRE_APR:\r
- case CEP_STATE_PRE_APR_MRA_SENT:\r
- CL_ASSERT( p_cep->p_mad );\r
- ib_put_mad( p_cep->p_mad );\r
- p_cep->p_mad = NULL;\r
- /* Fall through. */\r
- case CEP_STATE_LAP_RCVD:\r
- case CEP_STATE_LAP_MRA_SENT:\r
- CL_ASSERT( !p_cep->p_mad );\r
- status = __cep_get_mad( p_cep, CM_APR_ATTR_ID, &p_port_cep, &p_cep->p_mad );\r
- if( status != IB_SUCCESS )\r
- break;\r
-\r
- status = __format_apr( p_cep, p_cm_apr, p_cep->p_mad );\r
- if( status != IB_SUCCESS )\r
- {\r
- ib_put_mad( p_cep->p_mad );\r
- p_cep->p_mad = NULL;\r
- break;\r
- }\r
-\r
- if( !p_cm_apr->apr_status )\r
- {\r
- /*\r
- * Copy the temporary AV and port GUID information into\r
- * the alternate path.\r
- */\r
- p_cep->av[((p_cep->idx_primary + 1) & 0x1)] = p_cep->alt_av;\r
-\r
- /* Update our maximum packet lifetime. */\r
- p_cep->max_2pkt_life =\r
- max( p_cep->max_2pkt_life, p_cep->alt_2pkt_life );\r
-\r
- /* Update our timewait time. */\r
- __calc_timewait( p_cep );\r
-\r
- /* Fill in the QP attributes. */\r
- cl_memclr( p_apr, sizeof(ib_qp_mod_t) );\r
- p_apr->req_state = IB_QPS_RTS;\r
- p_apr->state.rts.opts =\r
- IB_MOD_QP_ALTERNATE_AV | IB_MOD_QP_APM_STATE;\r
- p_apr->state.rts.alternate_av = p_cep->alt_av.attr;\r
- p_apr->state.rts.apm_state = IB_APM_REARM;\r
- }\r
-\r
- p_cep->state |= CEP_STATE_PREP;\r
- status = IB_SUCCESS;\r
- break;\r
-\r
- default:\r
- AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM,\r
- ("Invalid state: %d\n", p_cep->state) );\r
- status = IB_INVALID_STATE;\r
- break;\r
- }\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_cep_send_apr(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid )\r
-{\r
- ib_api_status_t status;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
- cep_agent_t *p_port_cep;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_PRE_APR:\r
- case CEP_STATE_PRE_APR_MRA_SENT:\r
- CL_ASSERT( p_cep->p_mad );\r
- p_port_cep = __get_cep_agent( p_cep );\r
- if( !p_port_cep )\r
- {\r
- ib_put_mad( p_cep->p_mad );\r
- status = IB_INSUFFICIENT_RESOURCES;\r
- }\r
- else\r
- {\r
- p_cep->state = CEP_STATE_ESTABLISHED;\r
-\r
- __cep_send_mad( p_port_cep, p_cep->p_mad );\r
- status = IB_SUCCESS;\r
- }\r
- p_cep->p_mad = NULL;\r
- break;\r
-\r
- default:\r
- AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM,\r
- ("Invalid state: %d\n", p_cep->state) );\r
- status = IB_INVALID_STATE;\r
- break;\r
- }\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_cep_dreq(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- IN const uint8_t* const p_pdata,\r
- IN const uint8_t pdata_len )\r
-{\r
- ib_api_status_t status;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
- cep_agent_t *p_port_cep;\r
- ib_mad_element_t *p_mad;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_ESTABLISHED:\r
- case CEP_STATE_LAP_SENT:\r
- case CEP_STATE_LAP_RCVD:\r
- case CEP_STATE_LAP_MRA_SENT:\r
- case CEP_STATE_LAP_MRA_RCVD:\r
- status = __cep_get_mad( p_cep, CM_DREQ_ATTR_ID, &p_port_cep, &p_mad );\r
- if( status != IB_SUCCESS )\r
- break;\r
-\r
- status = __format_dreq( p_cep, p_pdata, pdata_len, p_mad );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("__format_dreq returned %s.\n", ib_get_err_str( status )) );\r
- break;\r
- }\r
-\r
- if( __cep_send_retry( p_port_cep, p_cep, p_mad ) == IB_SUCCESS )\r
- {\r
- p_cep->state = CEP_STATE_DREQ_SENT;\r
- }\r
- else\r
- {\r
- p_cep->state = CEP_STATE_TIMEWAIT;\r
- __insert_timewait( p_cep );\r
- }\r
-\r
- status = IB_SUCCESS;\r
- break;\r
-\r
- default:\r
- AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM,\r
- ("Invalid state: %d\n", p_cep->state) );\r
- status = IB_INVALID_STATE;\r
- break;\r
- }\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_cep_drep(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- IN const ib_cm_drep_t* const p_cm_drep )\r
-{\r
- ib_api_status_t status;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
- cep_agent_t *p_port_cep;\r
- ib_mad_element_t *p_mad;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
- CL_ASSERT( p_cm_drep );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_DREQ_RCVD:\r
- status = __cep_get_mad( p_cep, CM_DREP_ATTR_ID, &p_port_cep, &p_mad );\r
- if( status != IB_SUCCESS )\r
- break;\r
-\r
- status = __format_drep( p_cep, p_cm_drep->p_drep_pdata,\r
- p_cm_drep->drep_length, (mad_cm_drep_t*)p_mad->p_mad_buf );\r
- if( status != IB_SUCCESS )\r
- break;\r
-\r
- __cep_send_mad( p_port_cep, p_mad );\r
- p_cep->state = CEP_STATE_TIMEWAIT;\r
- __insert_timewait( p_cep );\r
- break;\r
-\r
- default:\r
- AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM,\r
- ("Invalid state: %d\n", p_cep->state) );\r
- status = IB_INVALID_STATE;\r
- break;\r
- }\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_cep_migrate(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid )\r
-{\r
- ib_api_status_t status;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_ESTABLISHED:\r
- case CEP_STATE_LAP_SENT:\r
- case CEP_STATE_LAP_RCVD:\r
- case CEP_STATE_LAP_MRA_SENT:\r
- case CEP_STATE_LAP_MRA_RCVD:\r
- if( p_cep->av[(p_cep->idx_primary + 1) & 0x1].port_guid )\r
- {\r
- p_cep->idx_primary++;\r
- p_cep->idx_primary &= 0x1;\r
- status = IB_SUCCESS;\r
- break;\r
- }\r
-\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("No alternate path avaialble.\n") );\r
-\r
- /* Fall through. */\r
- default:\r
- AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM,\r
- ("Invalid state: %d\n", p_cep->state) );\r
- status = IB_INVALID_STATE;\r
- break;\r
- }\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_cep_established(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid )\r
-{\r
- ib_api_status_t status;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_REP_SENT:\r
- case CEP_STATE_REP_MRA_RCVD:\r
- CL_ASSERT( p_cep->p_send_mad );\r
- ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
- p_cep->p_send_mad = NULL;\r
- p_cep->state = CEP_STATE_ESTABLISHED;\r
- status = IB_SUCCESS;\r
- break;\r
-\r
- default:\r
- AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM,\r
- ("Invalid state: %d\n", p_cep->state) );\r
- status = IB_INVALID_STATE;\r
- break;\r
- }\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_cep_get_rtr_attr(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- OUT ib_qp_mod_t* const p_rtr )\r
-{\r
- ib_api_status_t status;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
- CL_ASSERT( p_rtr );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_PRE_REP:\r
- case CEP_STATE_PRE_REP_MRA_SENT:\r
- case CEP_STATE_REP_SENT:\r
- case CEP_STATE_REP_MRA_RCVD:\r
- case CEP_STATE_REP_RCVD:\r
- case CEP_STATE_REP_MRA_SENT:\r
- case CEP_STATE_ESTABLISHED:\r
- cl_memclr( p_rtr, sizeof(ib_qp_mod_t) );\r
- p_rtr->req_state = IB_QPS_RTR;\r
-\r
- /* Required params. */\r
- p_rtr->state.rtr.rq_psn = p_cep->rq_psn;\r
- p_rtr->state.rtr.dest_qp = p_cep->remote_qpn;\r
- p_rtr->state.rtr.primary_av = p_cep->av[p_cep->idx_primary].attr;\r
- p_rtr->state.rtr.resp_res = p_cep->resp_res;\r
- p_rtr->state.rtr.rnr_nak_timeout = p_cep->rnr_nak_timeout;\r
-\r
- /* Optional params. */\r
- p_rtr->state.rtr.opts = 0;\r
- if( p_cep->av[(p_cep->idx_primary + 1) & 0x1].port_guid )\r
- {\r
- p_rtr->state.rtr.opts |= IB_MOD_QP_ALTERNATE_AV;\r
- p_rtr->state.rtr.alternate_av =\r
- p_cep->av[(p_cep->idx_primary + 1) & 0x1].attr;\r
- }\r
- status = IB_SUCCESS;\r
- break;\r
-\r
- default:\r
- AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM,\r
- ("Invalid state: %d\n", p_cep->state) );\r
- status = IB_INVALID_STATE;\r
- break;\r
- }\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_cep_get_rts_attr(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- OUT ib_qp_mod_t* const p_rts )\r
-{\r
- ib_api_status_t status;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
- CL_ASSERT( p_rts );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- switch( p_cep->state )\r
- {\r
- case CEP_STATE_REQ_SENT:\r
- case CEP_STATE_REQ_RCVD:\r
- case CEP_STATE_REQ_MRA_SENT:\r
- case CEP_STATE_REQ_MRA_RCVD:\r
- case CEP_STATE_REP_SENT:\r
- case CEP_STATE_REP_RCVD:\r
- case CEP_STATE_REP_MRA_SENT:\r
- case CEP_STATE_REP_MRA_RCVD:\r
- case CEP_STATE_PRE_REP:\r
- case CEP_STATE_PRE_REP_MRA_SENT:\r
- case CEP_STATE_ESTABLISHED:\r
- cl_memclr( p_rts, sizeof(ib_qp_mod_t) );\r
- p_rts->req_state = IB_QPS_RTS;\r
-\r
- /* Required params. */\r
- p_rts->state.rts.sq_psn = p_cep->sq_psn;\r
- p_rts->state.rts.retry_cnt =\r
- p_cep->av[p_cep->idx_primary].attr.conn.seq_err_retry_cnt;\r
- p_rts->state.rts.rnr_retry_cnt =\r
- p_cep->av[p_cep->idx_primary].attr.conn.rnr_retry_cnt;\r
- p_rts->state.rts.local_ack_timeout =\r
- p_cep->av[p_cep->idx_primary].attr.conn.local_ack_timeout;\r
- p_rts->state.rts.init_depth = p_cep->init_depth;\r
-\r
- /* Optional params. */\r
- p_rts->state.rts.opts = 0;\r
- if( p_cep->av[(p_cep->idx_primary + 1) & 0x1].port_guid )\r
- {\r
- p_rts->state.rts.opts =\r
- IB_MOD_QP_ALTERNATE_AV | IB_MOD_QP_APM_STATE;\r
- p_rts->state.rts.apm_state = IB_APM_REARM;\r
- p_rts->state.rts.alternate_av =\r
- p_cep->av[(p_cep->idx_primary + 1) & 0x1].attr;\r
- }\r
- status = IB_SUCCESS;\r
- break;\r
-\r
- default:\r
- AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM,\r
- ("Invalid state: %d\n", p_cep->state) );\r
- status = IB_INVALID_STATE;\r
- break;\r
- }\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_cep_get_timewait(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- OUT uint64_t* const p_timewait_us )\r
-{\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- *p_timewait_us = p_cep->timewait_time.QuadPart / 10;\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_cep_poll(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- OUT void* __ptr64 * p_context,\r
- OUT net32_t* const p_new_cid,\r
- OUT ib_mad_element_t** const pp_mad )\r
-{\r
- ib_api_status_t status;\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
- CL_ASSERT( p_new_cid );\r
- CL_ASSERT( pp_mad );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- *p_context = p_cep->context;\r
-\r
- if( !p_cep->p_mad_head )\r
- {\r
- p_cep->signalled = FALSE;\r
- status = IB_NOT_DONE;\r
- goto done;\r
- }\r
-\r
- /* Set the MAD. */\r
- *pp_mad = p_cep->p_mad_head;\r
- p_cep->p_mad_head = p_cep->p_mad_head->p_next;\r
- (*pp_mad)->p_next = NULL;\r
-\r
- /* We're done with the input CEP. Reuse the variable */\r
- p_cep = (kcep_t* __ptr64)(*pp_mad)->send_context1;\r
- if( p_cep )\r
- *p_new_cid = p_cep->cid;\r
- else\r
- *p_new_cid = AL_INVALID_CID;\r
-\r
- status = IB_SUCCESS;\r
-\r
-done:\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return status;\r
-}\r
-\r
-\r
-static void\r
-__cep_cancel_irp(\r
- IN DEVICE_OBJECT* p_dev_obj,\r
- IN IRP* p_irp )\r
-{\r
- net32_t cid;\r
- ib_al_handle_t h_al;\r
- KLOCK_QUEUE_HANDLE hdl;\r
- kcep_t *p_cep;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- UNUSED_PARAM( p_dev_obj );\r
- CL_ASSERT( p_irp );\r
-\r
- cid = (net32_t)(size_t)p_irp->Tail.Overlay.DriverContext[0];\r
- h_al = (ib_al_handle_t)p_irp->Tail.Overlay.DriverContext[1];\r
- CL_ASSERT( h_al );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( p_cep )\r
- __cep_complete_irp( p_cep, STATUS_CANCELLED, IO_NO_INCREMENT );\r
-\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
-\r
- IoReleaseCancelSpinLock( p_irp->CancelIrql );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-NTSTATUS\r
-al_cep_queue_irp(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- IN IRP* const p_irp )\r
-{\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
- CL_ASSERT( p_irp );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return STATUS_INVALID_PARAMETER;\r
- }\r
-\r
- /*\r
- * Store the CID an AL handle in the IRP's driver context\r
- * so we can cancel it.\r
- */\r
- p_irp->Tail.Overlay.DriverContext[0] = (void*)(size_t)cid;\r
- p_irp->Tail.Overlay.DriverContext[1] = (void*)h_al;\r
-#pragma warning(push, 3)\r
- IoSetCancelRoutine( p_irp, __cep_cancel_irp );\r
-#pragma warning(pop)\r
- IoMarkIrpPending( p_irp );\r
-\r
- /* Always dequeue and complete whatever IRP is there. */\r
- __cep_complete_irp( p_cep, STATUS_CANCELLED, IO_NO_INCREMENT );\r
-\r
- InterlockedExchangePointer( &p_cep->p_irp, p_irp );\r
-\r
- /* Complete the IRP if there are MADs to be reaped. */\r
- if( p_cep->p_mad_head )\r
- __cep_complete_irp( p_cep, STATUS_SUCCESS, IO_NETWORK_INCREMENT );\r
-\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return STATUS_PENDING;\r
-}\r
-\r
-\r
-void\r
-al_cep_cleanup_al(\r
- IN const ib_al_handle_t h_al )\r
-{\r
- cl_list_item_t *p_item;\r
- net32_t cid;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- /* Destroy all CEPs associated with the input instance of AL. */\r
- cl_spinlock_acquire( &h_al->obj.lock );\r
- for( p_item = cl_qlist_head( &h_al->cep_list );\r
- p_item != cl_qlist_end( &h_al->cep_list );\r
- p_item = cl_qlist_head( &h_al->cep_list ) )\r
- {\r
- /*\r
- * Note that we don't walk the list - we can't hold the AL\r
- * lock when cleaning up its CEPs because the cleanup path\r
- * takes the CEP's lock. We always want to take the CEP\r
- * before the AL lock to prevent any possibilities of deadlock.\r
- *\r
- * So we just get the CID, and then release the AL lock and try to\r
- * destroy. This should unbind the CEP from the AL instance and\r
- * remove it from the list, allowing the next CEP to be cleaned up\r
- * in the next pass through.\r
- */\r
- cid = PARENT_STRUCT( p_item, kcep_t, al_item )->cid;\r
- cl_spinlock_release( &h_al->obj.lock );\r
- al_destroy_cep( h_al, cid, NULL );\r
- cl_spinlock_acquire( &h_al->obj.lock );\r
- }\r
- cl_spinlock_release( &h_al->obj.lock );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-static void\r
-__cep_cancel_ndi_irp(\r
- IN DEVICE_OBJECT* p_dev_obj,\r
- IN IRP* p_irp )\r
-{\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- UNUSED_PARAM( p_dev_obj );\r
- CL_ASSERT( p_irp );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- RemoveEntryList( &p_irp->Tail.Overlay.ListEntry );\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
-\r
-#pragma warning(push, 3)\r
- IoSetCancelRoutine( p_irp, NULL );\r
-#pragma warning(pop)\r
- IoReleaseCancelSpinLock( p_irp->CancelIrql );\r
- \r
- /* Complete the IRP. */\r
- p_irp->IoStatus.Status = STATUS_CANCELLED;\r
- p_irp->IoStatus.Information = 0;\r
- IoCompleteRequest( p_irp, IO_NO_INCREMENT );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-NTSTATUS\r
-al_cep_get_cid(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t const cid,\r
- IN PIRP p_irp\r
- )\r
-{\r
- kcep_t *p_cep, *p_new_cep;\r
- NTSTATUS nt_status;\r
- KLOCK_QUEUE_HANDLE hdl;\r
- ib_mad_element_t* p_mad = NULL;\r
- mad_cm_req_t* p_req;\r
-\r
- AL_ENTER( AL_DBG_NDI );\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
-\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- p_irp->IoStatus.Information = 0;\r
- p_irp->IoStatus.Status = STATUS_INVALID_PARAMETER;\r
- IoCompleteRequest( p_irp, IO_NETWORK_INCREMENT );\r
- nt_status = STATUS_EVENT_DONE;\r
- goto exit;\r
- }\r
-\r
- if( !p_cep->p_mad_head )\r
- { /* no pending MADs - queue the IRP */ \r
- p_cep->signalled = FALSE;\r
- InsertTailList( &p_cep->irp_que, &p_irp->Tail.Overlay.ListEntry );\r
- p_irp->Tail.Overlay.DriverContext[0] = (void*)(size_t)cid;\r
- p_irp->Tail.Overlay.DriverContext[1] = (void*)h_al;\r
-#pragma warning(push, 3)\r
- IoSetCancelRoutine( p_irp, __cep_cancel_ndi_irp );\r
-#pragma warning(pop)\r
- IoMarkIrpPending( p_irp );\r
- nt_status = STATUS_PENDING;\r
- goto exit;\r
- }\r
-\r
- /* Set the MAD. */\r
- p_mad = p_cep->p_mad_head;\r
- p_cep->p_mad_head = p_mad->p_next;\r
- p_mad->p_next = NULL;\r
-\r
- /* store REQ private data */\r
- p_req = (mad_cm_req_t*)ib_get_mad_buf( p_mad );\r
- p_new_cep = (kcep_t* __ptr64)p_mad->send_context1;\r
- __cep_set_pdata( h_al, p_new_cep->cid,\r
- sizeof(p_req->pdata), (uint8_t*)p_req->pdata );\r
- AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,\r
- ("set %d of REQ pdata to CEP with cid %d, h_al %p\n", \r
- sizeof(p_req->pdata), p_new_cep->cid, h_al ));\r
-\r
- /* complete the IRP */\r
- __complete_ndi_irp( p_irp, p_mad );\r
- nt_status = STATUS_EVENT_DONE;\r
- \r
-exit:\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_NDI );\r
- return nt_status;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_cep_get_pdata(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- IN OUT uint8_t *p_psize,\r
- OUT uint8_t* pdata )\r
-{\r
- kcep_t *p_cep;\r
- KLOCK_QUEUE_HANDLE hdl;\r
- uint8_t remainder;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- p_cep = __lookup_cep( h_al, cid );\r
- if( !p_cep )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, \r
- ("CEP not found for cid %d, h_al %p\n", cid, h_al ));\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- if ( *p_psize < p_cep->psize )\r
- {\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, \r
- ("Insufficient size: *p_psize %d, max %d, cid %d, h_al %p\n", \r
- *p_psize, p_cep->psize, cid, h_al ));\r
- return IB_INVALID_PARAMETER;\r
- }\r
- \r
- memcpy( pdata, p_cep->pdata, p_cep->psize );\r
- remainder = *p_psize - p_cep->psize;\r
- if (remainder)\r
- cl_memclr( &pdata[p_cep->psize], remainder );\r
- *p_psize = p_cep->psize;\r
- if ( !*p_psize )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, \r
- ("p_cep->psize is zero for cid %d, h_al %p\n", cid, h_al ));\r
- }\r
-\r
- AL_PRINT(TRACE_LEVEL_INFORMATION ,AL_DBG_CM ,\r
- ("al_cep_get_pdata: get %d of pdata from CEP with cid %d, h_al %p, context %p \n", \r
- p_cep->psize, cid, h_al, p_cep->context ));\r
-\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_SUCCESS;\r
-}\r
-\r
-ib_api_status_t\r
-al_cep_set_pdata(\r
- IN ib_al_handle_t h_al,\r
- IN net32_t cid,\r
- IN uint8_t psize,\r
- IN uint8_t* pdata )\r
-{\r
- ib_api_status_t status;\r
- KLOCK_QUEUE_HANDLE hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- CL_ASSERT( h_al );\r
-\r
- KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl );\r
- status = __cep_set_pdata( h_al, cid, psize, pdata );\r
- KeReleaseInStackQueuedSpinLock( &hdl );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return IB_SUCCESS;\r
-}\r
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_dev.c 931 2008-01-31 09:20:41Z leonidk $\r
- */\r
-\r
-#include <iba/ib_al.h>\r
-#include <complib/cl_qmap.h>\r
-#include <complib/cl_memory.h>\r
-#include <complib/cl_qpool.h>\r
-#include <complib/cl_passivelock.h>\r
-#include <complib/cl_vector.h>\r
-#include <complib/cl_spinlock.h>\r
-\r
-#include "al.h"\r
-#include "al_ca.h"\r
-#include "al_common.h"\r
-#include "al_cq.h"\r
-#include "al_debug.h"\r
-#if defined(EVENT_TRACING)\r
-#ifdef offsetof\r
-#undef offsetof\r
-#endif\r
-#include "al_dev.tmh"\r
-#endif\r
-#include "al_dev.h"\r
-#include "al_qp.h"\r
-#include "al_mgr.h"\r
-#include "al_proxy.h"\r
-\r
-\r
-\r
-static cl_status_t\r
-__proxy_reg_pnp(\r
- IN al_dev_open_context_t *p_context );\r
-\r
-static void\r
-__proxy_cancel_cblists(\r
- IN al_dev_open_context_t *p_context );\r
-\r
-\r
-\r
-\r
-static void\r
-__construct_open_context(\r
- IN al_dev_open_context_t *p_context )\r
-{\r
- cl_event_construct( &p_context->close_event );\r
-\r
- cl_qpool_construct( &p_context->cb_pool );\r
- cl_spinlock_construct( &p_context->cb_pool_lock );\r
-\r
- cl_qlist_init( &p_context->cm_cb_list );\r
- cl_qlist_init( &p_context->comp_cb_list );\r
- cl_qlist_init( &p_context->misc_cb_list );\r
- cl_spinlock_construct( &p_context->cb_lock );\r
- cl_mutex_construct( &p_context->pnp_mutex );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Initialize all objects used by the per client open context.\r
- */\r
-static cl_status_t\r
-__init_open_context(\r
- IN al_dev_open_context_t *p_context )\r
-{\r
- cl_status_t cl_status;\r
-\r
- cl_status = cl_event_init( &p_context->close_event, FALSE );\r
- if( cl_status != CL_SUCCESS )\r
- return cl_status;\r
-\r
- /* Allocate pool for storing callback info or requests. */\r
- cl_status = cl_qpool_init( &p_context->cb_pool,\r
- AL_CB_POOL_START_SIZE, 0, AL_CB_POOL_GROW_SIZE,\r
- sizeof(al_proxy_cb_info_t), NULL, NULL, NULL );\r
- if( cl_status != CL_SUCCESS )\r
- return cl_status;\r
-\r
- cl_status = cl_spinlock_init( &p_context->cb_pool_lock );\r
- if( cl_status != CL_SUCCESS )\r
- return cl_status;\r
-\r
- cl_status = cl_spinlock_init( &p_context->cb_lock );\r
- if( cl_status != CL_SUCCESS )\r
- return cl_status;\r
-\r
- cl_status = cl_mutex_init( &p_context->pnp_mutex );\r
- if( cl_status != CL_SUCCESS )\r
- return cl_status;\r
-\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-static void\r
-__destroy_open_context(\r
- IN al_dev_open_context_t *p_context )\r
-{\r
- cl_event_destroy( &p_context->close_event );\r
-\r
- cl_qpool_destroy( &p_context->cb_pool );\r
- cl_spinlock_destroy( &p_context->cb_pool_lock );\r
- cl_spinlock_destroy( &p_context->cb_lock );\r
- cl_mutex_destroy( &p_context->pnp_mutex );\r
-}\r
-\r
-\r
-\r
-cl_status_t\r
-al_dev_open(\r
- IN cl_ioctl_handle_t h_ioctl )\r
-{\r
- al_dev_open_context_t *p_context;\r
- ib_api_status_t status;\r
- cl_status_t cl_status;\r
- IO_STACK_LOCATION *p_io_stack;\r
- ULONG *p_ver;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
-\r
- p_ver = cl_ioctl_in_buf( h_ioctl );\r
-\r
- if( p_io_stack->FileObject->FsContext ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(ULONG) ||\r
- !p_ver ||\r
- cl_ioctl_out_size( h_ioctl ) )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("context already exists or bad parameters.\n") );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- if( *p_ver != AL_IOCTL_VERSION )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Unsupported client version: %d\n", *p_ver) );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Allocate the client's context structure. */\r
- p_context = (al_dev_open_context_t*)\r
- cl_zalloc( sizeof(al_dev_open_context_t) );\r
- if( !p_context )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("cl_malloc( %d ) failed.\n", sizeof(al_dev_open_context_t)) );\r
- return CL_INSUFFICIENT_MEMORY;\r
- }\r
-\r
- /* Construct the open context to allow destruction. */\r
- __construct_open_context( p_context );\r
-\r
- /* Initialize the open context elements. */\r
- cl_status = __init_open_context( p_context );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- __destroy_open_context( p_context );\r
- return cl_status;\r
- }\r
-\r
- /* Open an internal AL instance for this process. */\r
- status = ib_open_al( &p_context->h_al );\r
- if( status == IB_SUCCESS )\r
- {\r
- /* Register for PnP events. */\r
- status = __proxy_reg_pnp( p_context );\r
- }\r
-\r
- /* Make sure that we were able to open AL and register for PnP. */\r
- if( status == IB_SUCCESS )\r
- {\r
- /*\r
- * Store the reference from the AL instance back to this\r
- * open context. This allows using the user-mode context\r
- * for resource creation.\r
- */\r
- p_context->h_al->p_context = p_context;\r
- /* We successfully opened the device. */\r
- p_io_stack->FileObject->FsContext = p_context;\r
- }\r
- else\r
- {\r
- __destroy_open_context( p_context );\r
- cl_status = CL_INSUFFICIENT_RESOURCES;\r
- }\r
-\r
- AL_EXIT( AL_DBG_DEV );\r
- return cl_status;\r
-}\r
-\r
-\r
-\r
-/*\r
- * To be called by al_dev_open(). This will register for PnP events\r
- * on behalf of user process (UAL). It uses the implicit global\r
- * al instance created by AL manager. PnP events are propagated\r
- * to UAL automatically from the time AL device is open till the\r
- * process exits.\r
- */\r
-static ib_api_status_t\r
-__proxy_reg_pnp(\r
- IN al_dev_open_context_t *p_context )\r
-{\r
- ib_pnp_req_t pnp_req;\r
- ib_pnp_handle_t h_pnp;\r
- ib_api_status_t status;\r
- \r
- /* Register for PnP events. */\r
- cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
- pnp_req.pnp_class = IB_PNP_CA | IB_PNP_FLAG_REG_COMPLETE;\r
- pnp_req.pnp_context = p_context;\r
- pnp_req.pfn_pnp_cb = proxy_pnp_ca_cb;\r
-\r
- /* No need to track the registration. We'll deregister when closing AL. */\r
- status = ib_reg_pnp( p_context->h_al, &pnp_req, &h_pnp );\r
- if( status != IB_SUCCESS )\r
- return status;\r
- \r
- /* Register for port events. */\r
- pnp_req.pfn_pnp_cb = proxy_pnp_port_cb;\r
- pnp_req.pnp_class = IB_PNP_PORT | IB_PNP_FLAG_REG_COMPLETE;\r
- status = ib_reg_pnp( p_context->h_al, &pnp_req, &h_pnp );\r
- \r
- return status;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Cleanup the handle map. Remove all mappings. Perform all necessary\r
- * operations.\r
- */\r
-static void\r
-__proxy_cleanup_map(\r
- IN al_dev_open_context_t *p_context )\r
-{\r
- al_handle_t *p_h;\r
- size_t i;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- cl_spinlock_acquire( &p_context->h_al->obj.lock );\r
- for( i = 0; i < cl_vector_get_size( &p_context->h_al->hdl_vector ); i++ )\r
- {\r
- p_h = (al_handle_t*)\r
- cl_vector_get_ptr( &p_context->h_al->hdl_vector, i );\r
-\r
- switch( AL_BASE_TYPE( p_h->type ) )\r
- {\r
- /* Return any MADs not reported to the user. */\r
- case AL_OBJ_TYPE_H_MAD:\r
- ib_put_mad( (ib_mad_element_t*)p_h->p_obj );\r
- al_hdl_free( p_context->h_al, i );\r
- break;\r
-\r
- case AL_OBJ_TYPE_H_CA_ATTR:\r
- /* Release a saved CA attribute. */\r
- cl_free( p_h->p_obj );\r
- al_hdl_free( p_context->h_al, i );\r
- break;\r
-\r
- case AL_OBJ_TYPE_H_SA_REQ:\r
- al_cancel_sa_req( (al_sa_req_t*)p_h->p_obj );\r
- break;\r
-\r
- case AL_OBJ_TYPE_H_PNP_EVENT:\r
- cl_event_signal( &((proxy_pnp_evt_t*)p_h->p_obj)->event );\r
- break;\r
-\r
- default:\r
- /* Nothing else to do for other handle types. */\r
- break;\r
- }\r
- }\r
- cl_spinlock_release( &p_context->h_al->obj.lock );\r
-\r
- AL_EXIT( AL_DBG_DEV );\r
-}\r
-\r
-\r
-cl_status_t\r
-al_dev_close(\r
- IN cl_ioctl_handle_t h_ioctl )\r
-{\r
- al_dev_open_context_t *p_context;\r
- IO_STACK_LOCATION *p_io_stack;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
-\r
- /* Determine if the client closed the al_handle. */\r
- p_context = (al_dev_open_context_t*)p_io_stack->FileObject->FsContext;\r
- if( !p_context )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Client closed with a null open context .\n") );\r
- return CL_SUCCESS;\r
- }\r
- if( p_io_stack->FileObject->FsContext2 )\r
- {\r
- /* Not the main file object - ignore. */\r
- AL_EXIT( AL_DBG_DEV );\r
- return CL_SUCCESS;\r
- }\r
-\r
- /* Mark that we're closing this device. */\r
- p_context->closing = TRUE;\r
-\r
- /* Flush any pending IOCTLs in case user-mode threads died on us. */\r
- if( p_context->h_cm_ioctl )\r
- al_dev_cancel_ioctl( p_context->h_cm_ioctl );\r
- if( p_context->h_comp_ioctl )\r
- al_dev_cancel_ioctl( p_context->h_comp_ioctl );\r
- if( p_context->h_misc_ioctl )\r
- al_dev_cancel_ioctl( p_context->h_misc_ioctl );\r
-\r
- while( p_context->ref_cnt )\r
- {\r
-#ifdef _DEBUG_\r
- cl_status_t cl_status;\r
-\r
- cl_status = cl_event_wait_on( &p_context->close_event, 1000, FALSE );\r
- ASSERT( cl_status == IB_SUCCESS );\r
- if( cl_status != IB_SUCCESS )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Waiting on ref_cnt timed out!\n") );\r
- break;\r
- }\r
-#else\r
- cl_event_wait_on( &p_context->close_event, EVENT_NO_TIMEOUT, FALSE );\r
-#endif\r
- }\r
-\r
- /* Cleanup any leftover callback resources. */\r
- __proxy_cancel_cblists( p_context );\r
-\r
- /* Close the AL instance for this process. */\r
- if( p_context->h_al )\r
- {\r
- /* Cleanup all user to kernel handle mappings. */\r
- __proxy_cleanup_map( p_context );\r
-\r
- ib_close_al( p_context->h_al );\r
- p_context->h_al = NULL;\r
- }\r
-\r
- /* Destroy the open context now. */\r
- __destroy_open_context( p_context );\r
- cl_free( p_context );\r
-\r
- AL_EXIT( AL_DBG_DEV );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Remove all callbacks on the given callback queue and return them to\r
- * the callback pool.\r
- */\r
-static void\r
-__proxy_dq_cblist(\r
- IN al_dev_open_context_t *p_context,\r
- IN cl_qlist_t *p_cblist )\r
-{\r
- cl_list_item_t *p_list_item;\r
- al_proxy_cb_info_t *p_cb_info;\r
-\r
- cl_spinlock_acquire( &p_context->cb_lock );\r
- for( p_list_item = cl_qlist_remove_head( p_cblist );\r
- p_list_item != cl_qlist_end( p_cblist );\r
- p_list_item = cl_qlist_remove_head( p_cblist ) )\r
- {\r
- p_cb_info = (al_proxy_cb_info_t*)p_list_item;\r
- if( p_cb_info->p_al_obj )\r
- deref_al_obj( p_cb_info->p_al_obj );\r
- proxy_cb_put( p_cb_info );\r
- }\r
- cl_spinlock_release( &p_context->cb_lock );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Remove all queued callbacks from all callback lists.\r
- */\r
-static void\r
-__proxy_cancel_cblists(\r
- IN al_dev_open_context_t *p_context )\r
-{\r
- __proxy_dq_cblist( p_context, &p_context->cm_cb_list );\r
- __proxy_dq_cblist( p_context, &p_context->comp_cb_list );\r
- __proxy_dq_cblist( p_context, &p_context->misc_cb_list );\r
-}\r
-\r
-\r
-cl_status_t\r
-al_dev_ioctl(\r
- IN cl_ioctl_handle_t h_ioctl )\r
-{\r
- cl_status_t cl_status;\r
- size_t ret_bytes = 0;\r
- void *p_open_context;\r
- IO_STACK_LOCATION *p_io_stack;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
- p_open_context = p_io_stack->FileObject->FsContext;\r
-\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_DEV,\r
- ("al_dev_ioctl: buf_size (%d) p_buf (%016I64x).\n",\r
- cl_ioctl_in_size( h_ioctl ), (LONG_PTR)cl_ioctl_in_buf( h_ioctl )) );\r
-\r
- /* Process the ioctl command. */\r
- if( IS_AL_PROXY_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )\r
- cl_status = proxy_ioctl( h_ioctl, &ret_bytes );\r
- else if( IS_VERBS_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )\r
- cl_status = verbs_ioctl( h_ioctl, &ret_bytes );\r
- //else if( IS_CM_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )\r
- // cl_status = cm_ioctl( h_ioctl, &ret_bytes );\r
- else if( IS_CEP_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )\r
- cl_status = cep_ioctl( h_ioctl, &ret_bytes );\r
- else if( IS_AL_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )\r
- cl_status = al_ioctl( h_ioctl, &ret_bytes );\r
- else if( IS_SUBNET_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )\r
- cl_status = subnet_ioctl( h_ioctl, &ret_bytes );\r
- else if( IS_IOC_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )\r
- cl_status = ioc_ioctl( h_ioctl, &ret_bytes );\r
- else if( IS_NDI_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )\r
- cl_status = ndi_ioctl( h_ioctl, &ret_bytes );\r
- else\r
- cl_status = CL_INVALID_REQUEST;\r
-\r
- switch( cl_status )\r
- {\r
- case CL_COMPLETED:\r
- /* Flip the status since the IOCTL was completed. */\r
- cl_status = CL_SUCCESS;\r
- /* Fall through */\r
- case CL_PENDING:\r
- break;\r
- case CL_INVALID_REQUEST:\r
- /*\r
- * In Windows, Driver Verifier sends bogus IOCTLs to the device.\r
- * These must be passed down the device stack, and so cannot be\r
- * completed in the IOCTL handler. They are properly cleaned up,\r
- * though no data is returned to the user.\r
- */\r
- break;\r
- default:\r
- cl_ioctl_complete( h_ioctl, cl_status, ret_bytes );\r
- }\r
-\r
- AL_EXIT( AL_DBG_DEV );\r
- return cl_status;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Cancel any pending IOCTL calls for the specified type.\r
- * This routine is also called when closing the device.\r
- */\r
-void\r
-al_dev_cancel_ioctl(\r
- IN cl_ioctl_handle_t h_ioctl )\r
-{\r
- al_dev_open_context_t *p_context;\r
- cl_ioctl_handle_t *ph_ioctl;\r
- PIO_STACK_LOCATION p_io_stack;\r
-\r
- /*\r
- * Search the ioctl buffer in the process specific queue\r
- * Dequeue it, if found\r
- */\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- /* Get the stack location. */\r
- p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
-\r
- p_context = (al_dev_open_context_t *)p_io_stack->FileObject->FsContext;\r
- ASSERT( p_context );\r
-\r
- /* Clear the IOCTL. */\r
- cl_spinlock_acquire( &p_context->cb_lock );\r
- switch( cl_ioctl_ctl_code( h_ioctl ) )\r
- {\r
- case UAL_GET_CM_CB_INFO:\r
- ph_ioctl = &p_context->h_cm_ioctl;\r
- break;\r
- case UAL_GET_COMP_CB_INFO:\r
- ph_ioctl = &p_context->h_comp_ioctl;\r
- break;\r
- case UAL_GET_MISC_CB_INFO:\r
- ph_ioctl = &p_context->h_misc_ioctl;\r
- break;\r
- default:\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid CB type\n") );\r
- ph_ioctl = NULL;\r
- break;\r
- }\r
-\r
- if( ph_ioctl && *ph_ioctl == h_ioctl )\r
- {\r
- *ph_ioctl = NULL;\r
-#pragma warning(push, 3)\r
- IoSetCancelRoutine( h_ioctl, NULL );\r
-#pragma warning(pop)\r
-\r
- /* Complete the IOCTL. */\r
- cl_ioctl_complete( h_ioctl, CL_CANCELED, 0 );\r
- proxy_context_deref( p_context );\r
- }\r
- cl_spinlock_release( &p_context->cb_lock );\r
-\r
- AL_EXIT( AL_DBG_DEV );\r
-}\r
-\r
-\r
-void\r
-al_dev_cancel_io(\r
- IN DEVICE_OBJECT *p_dev_obj,\r
- IN IRP *p_irp )\r
-{\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- UNUSED_PARAM( p_dev_obj );\r
-\r
- al_dev_cancel_ioctl( p_irp );\r
-\r
- IoReleaseCancelSpinLock( p_irp->CancelIrql );\r
-\r
- AL_EXIT( AL_DBG_DEV );\r
-}\r
+++ /dev/null
-/*\r
- * Copyright (c) 2008 Intel Corporation. All rights reserved.\r
- * Copyright (c) 2006 Mellanox Technologies. All rights reserved.\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id$\r
- */\r
-\r
-/*\r
- * Driver entry points for the InfiniBand Access Library Filter Driver.\r
- */\r
-\r
-#include <complib/cl_types.h>\r
-#include "al_mgr.h"\r
-#include "al_dev.h"\r
-#include "al_debug.h"\r
-#include <complib/cl_init.h>\r
-#include "al_init.h"\r
-#include "al_driver.h"\r
-\r
-#include <initguid.h>\r
-#include "iba/ib_al_ifc.h"\r
-#include "iba/ib_ci_ifc.h"\r
-\r
-#if defined(EVENT_TRACING)\r
-#ifdef offsetof\r
-#undef offsetof\r
-#endif\r
-#endif\r
-\r
-uint32_t g_al_dbg_level;\r
-uint32_t g_al_dbg_flags;\r
-\r
-al_globals_t al_globals;\r
-\r
-#define DEFAULT_NODE_DESC "OpenIB Windows® Host"\r
-\r
-char node_desc[IB_NODE_DESCRIPTION_SIZE];\r
-\r
-static void\r
-__read_machine_name( void );\r
-\r
-static NTSTATUS\r
-__read_registry(\r
- IN UNICODE_STRING* const p_Param_Path );\r
-\r
-static NTSTATUS\r
-al_drv_open(\r
- IN DEVICE_OBJECT *p_dev_obj,\r
- IN IRP *p_irp );\r
-\r
-static NTSTATUS\r
-al_drv_cleanup(\r
- IN DEVICE_OBJECT *p_dev_obj,\r
- IN IRP *p_irp );\r
-\r
-static NTSTATUS\r
-al_drv_close(\r
- IN DEVICE_OBJECT *p_dev_obj,\r
- IN IRP *p_irp );\r
-\r
-static NTSTATUS\r
-al_drv_ioctl(\r
- IN DEVICE_OBJECT *p_dev_obj,\r
- IN IRP *p_irp );\r
-\r
-\r
-NTSTATUS\r
-al_drv_add_device(\r
- IN DRIVER_OBJECT *p_driver_obj,\r
- IN DEVICE_OBJECT *p_pdo );\r
-\r
-static NTSTATUS\r
-fdo_start(\r
- IN DEVICE_OBJECT* const p_dev_obj,\r
- IN IRP* const p_irp, \r
- OUT cl_irp_action_t* const p_action );\r
-\r
-static NTSTATUS\r
-fdo_query_capabilities(\r
- IN DEVICE_OBJECT* const p_dev_obj,\r
- IN IRP* const p_irp, \r
- OUT cl_irp_action_t* const p_action );\r
-\r
-\r
-static NTSTATUS\r
-fdo_query_remove(\r
- IN DEVICE_OBJECT* const p_dev_obj,\r
- IN IRP* const p_irp, \r
- OUT cl_irp_action_t* const p_action );\r
-\r
-static void\r
-fdo_release_resources(\r
- IN DEVICE_OBJECT* const p_dev_obj );\r
-\r
-static NTSTATUS\r
-fdo_query_interface(\r
- IN DEVICE_OBJECT* const p_dev_obj,\r
- IN IRP* const p_irp, \r
- OUT cl_irp_action_t* const p_action );\r
-\r
-\r
-/***f* InfiniBand Access Library (IBAL) Driver/al_drv_sysctl\r
-* NAME\r
-* al_drv_sysctl\r
-*\r
-* DESCRIPTION\r
-* Entry point for handling WMI IRPs.\r
-*\r
-* SYNOPSIS\r
-*/\r
-static NTSTATUS\r
-al_drv_sysctl(\r
- IN DEVICE_OBJECT *p_dev_obj,\r
- IN IRP *p_irp );\r
-/**********/\r
-\r
-static NTSTATUS\r
-al_drv_ignore_power(\r
- IN DEVICE_OBJECT *p_dev_obj,\r
- IN IRP *p_irp );\r
-\r
-static void\r
-al_drv_unload(\r
- IN DRIVER_OBJECT *p_driver_obj );\r
-\r
-NTSTATUS\r
-DriverEntry(\r
- IN DRIVER_OBJECT *p_driver_obj,\r
- IN UNICODE_STRING *p_registry_path );\r
-\r
-\r
-#ifdef ALLOC_PRAGMA\r
-#pragma alloc_text (INIT, DriverEntry)\r
-#pragma alloc_text (INIT, __read_machine_name)\r
-#pragma alloc_text (INIT, __read_registry)\r
-#pragma alloc_text (PAGE, al_drv_unload)\r
-#pragma alloc_text (PAGE, al_drv_add_device)\r
-#pragma alloc_text (PAGE, al_drv_open)\r
-#pragma alloc_text (PAGE, al_drv_close)\r
-#pragma alloc_text (PAGE, al_drv_ioctl)\r
-#pragma alloc_text (PAGE_PNP, al_drv_ignore_power)\r
-#pragma alloc_text (PAGE_PNP, al_drv_sysctl)\r
-#endif\r
-\r
-\r
-static void\r
-__read_machine_name( void )\r
-{\r
- NTSTATUS status;\r
- /* Remember the terminating entry in the table below. */\r
- RTL_QUERY_REGISTRY_TABLE table[2];\r
- UNICODE_STRING hostNamePath;\r
- UNICODE_STRING hostNameW;\r
- ANSI_STRING hostName;\r
-\r
- AL_ENTER( AL_DBG_DRV );\r
-\r
- /* Get the host name. */\r
- RtlInitUnicodeString( &hostNamePath, L"ComputerName\\ComputerName" );\r
- RtlInitUnicodeString( &hostNameW, NULL );\r
-\r
- /*\r
- * Clear the table. This clears all the query callback pointers,\r
- * and sets up the terminating table entry.\r
- */\r
- cl_memclr( table, sizeof(table) );\r
- cl_memclr( node_desc, sizeof(node_desc) );\r
-\r
- /* Setup the table entries. */\r
- table[0].Flags = RTL_QUERY_REGISTRY_DIRECT | RTL_QUERY_REGISTRY_REQUIRED;\r
- table[0].Name = L"ComputerName";\r
- table[0].EntryContext = &hostNameW;\r
- table[0].DefaultType = REG_SZ;\r
- table[0].DefaultData = &hostNameW;\r
- table[0].DefaultLength = 0;\r
-\r
- /* Have at it! */\r
- status = RtlQueryRegistryValues( RTL_REGISTRY_CONTROL, \r
- hostNamePath.Buffer, table, NULL, NULL );\r
- if( NT_SUCCESS( status ) )\r
- {\r
- /* Convert the UNICODE host name to UTF-8 (ASCII). */\r
- hostName.Length = 0;\r
- hostName.MaximumLength = sizeof(node_desc);\r
- hostName.Buffer = node_desc;\r
- status = RtlUnicodeStringToAnsiString( &hostName, &hostNameW, FALSE );\r
- RtlFreeUnicodeString( &hostNameW );\r
- }\r
- else\r
- {\r
- AL_TRACE(AL_DBG_ERROR , ("Failed to get host name.\n") );\r
- /* Use the default name... */\r
- RtlStringCbCopyNA( node_desc, sizeof(node_desc),\r
- DEFAULT_NODE_DESC, sizeof(DEFAULT_NODE_DESC) );\r
- }\r
-\r
- AL_EXIT( AL_DBG_DRV );\r
-}\r
-\r
-\r
-static NTSTATUS\r
-__read_registry(\r
- IN UNICODE_STRING* const p_registry_path )\r
-{\r
- NTSTATUS status;\r
- /* Remember the terminating entry in the table below. */\r
- RTL_QUERY_REGISTRY_TABLE table[7];\r
- UNICODE_STRING param_path;\r
-\r
- AL_ENTER( AL_DBG_DRV );\r
-\r
- __read_machine_name();\r
-\r
- RtlInitUnicodeString( ¶m_path, NULL );\r
- param_path.MaximumLength = p_registry_path->Length + \r
- sizeof(L"\\Parameters");\r
- param_path.Buffer = cl_zalloc( param_path.MaximumLength );\r
- if( !param_path.Buffer )\r
- {\r
- AL_TRACE_EXIT( AL_DBG_ERROR, \r
- ("Failed to allocate parameters path buffer.\n") );\r
- return STATUS_INSUFFICIENT_RESOURCES;\r
- }\r
-\r
- RtlAppendUnicodeStringToString( ¶m_path, p_registry_path );\r
- RtlAppendUnicodeToString( ¶m_path, L"\\Parameters" );\r
-\r
- /*\r
- * Clear the table. This clears all the query callback pointers,\r
- * and sets up the terminating table entry.\r
- */\r
- cl_memclr( table, sizeof(table) );\r
-\r
- /* Setup the table entries. */\r
-\r
- table[0].Flags = RTL_QUERY_REGISTRY_DIRECT;\r
- table[0].Name = L"IbalDebugLevel";\r
- table[0].EntryContext = &g_al_dbg_level;\r
- table[0].DefaultType = REG_DWORD;\r
- table[0].DefaultData = &g_al_dbg_level;\r
- table[0].DefaultLength = sizeof(ULONG);\r
-\r
- table[1].Flags = RTL_QUERY_REGISTRY_DIRECT;\r
- table[1].Name = L"IbalDebugFlags";\r
- table[1].EntryContext = &g_al_dbg_flags;\r
- table[1].DefaultType = REG_DWORD;\r
- table[1].DefaultData = &g_al_dbg_flags;\r
- table[1].DefaultLength = sizeof(ULONG);\r
-\r
- table[2].Flags = RTL_QUERY_REGISTRY_DIRECT;\r
- table[2].Name = L"SmiPollInterval";\r
- table[2].EntryContext = &g_smi_poll_interval;\r
- table[2].DefaultType = REG_DWORD;\r
- table[2].DefaultData = &g_smi_poll_interval;\r
- table[2].DefaultLength = sizeof(ULONG);\r
-\r
- table[3].Flags = RTL_QUERY_REGISTRY_DIRECT;\r
- table[3].Name = L"IocQueryTimeout";\r
- table[3].EntryContext = &g_ioc_query_timeout;\r
- table[3].DefaultType = REG_DWORD;\r
- table[3].DefaultData = &g_ioc_query_timeout;\r
- table[3].DefaultLength = sizeof(ULONG);\r
-\r
- table[4].Flags = RTL_QUERY_REGISTRY_DIRECT;\r
- table[4].Name = L"IocQueryRetries";\r
- table[4].EntryContext = &g_ioc_query_retries;\r
- table[4].DefaultType = REG_DWORD;\r
- table[4].DefaultData = &g_ioc_query_retries;\r
- table[4].DefaultLength = sizeof(ULONG);\r
-\r
- table[5].Flags = RTL_QUERY_REGISTRY_DIRECT;\r
- table[5].Name = L"IocPollInterval";\r
- table[5].EntryContext = &g_ioc_poll_interval;\r
- table[5].DefaultType = REG_DWORD;\r
- table[5].DefaultData = &g_ioc_poll_interval;\r
- table[5].DefaultLength = sizeof(ULONG);\r
-\r
- /* Have at it! */\r
- status = RtlQueryRegistryValues( RTL_REGISTRY_ABSOLUTE, \r
- param_path.Buffer, table, NULL, NULL );\r
-\r
-#if DBG\r
-// XXX STAN\r
- g_al_dbg_flags = (AL_DBG_PNP | AL_DBG_ERR | AL_DBG_DRV | AL_DBG_QUERY);\r
- g_al_dbg_level = TRACE_LEVEL_VERBOSE;\r
-\r
- if( g_al_dbg_flags & AL_DBG_ERR )\r
- g_al_dbg_flags |= CL_DBG_ERROR;\r
-#endif\r
-\r
- AL_TRACE(AL_DBG_DRV ,\r
- ("debug level %d debug flags 0x%.8x\n",\r
- g_al_dbg_level,\r
- g_al_dbg_flags));\r
-\r
- cl_free( param_path.Buffer );\r
- AL_EXIT( AL_DBG_DRV );\r
- return status;\r
-}\r
-\r
-\r
-static NTSTATUS\r
-al_drv_open(\r
- IN DEVICE_OBJECT *p_dev_obj,\r
- IN IRP *p_irp )\r
-{\r
- AL_ENTER( AL_DBG_DRV );\r
-\r
- UNUSED_PARAM( p_dev_obj );\r
-\r
- CL_ASSERT( KeGetCurrentIrql() == PASSIVE_LEVEL );\r
-\r
- /* We always succeed file handles creation. */\r
- p_irp->IoStatus.Status = STATUS_SUCCESS;\r
- p_irp->IoStatus.Information = 0;\r
- IoCompleteRequest( p_irp, IO_NO_INCREMENT );\r
-\r
- AL_EXIT( AL_DBG_DRV );\r
- return STATUS_SUCCESS;\r
-}\r
-\r
-\r
-static NTSTATUS\r
-al_drv_cleanup(\r
- IN DEVICE_OBJECT *p_dev_obj,\r
- IN IRP *p_irp )\r
-{\r
- NTSTATUS status;\r
-\r
- AL_ENTER( AL_DBG_DRV );\r
-\r
- UNUSED_PARAM( p_dev_obj );\r
-\r
- CL_ASSERT( KeGetCurrentIrql() == PASSIVE_LEVEL );\r
-\r
- /*\r
- * Note that we don't acquire the remove and stop lock on close to allow\r
- * applications to close the device when the locks are already held.\r
- */\r
- status = cl_to_ntstatus( al_dev_close( p_irp ) );\r
-\r
- /* Complete the IRP. */\r
- p_irp->IoStatus.Status = status;\r
- p_irp->IoStatus.Information = 0;\r
- IoCompleteRequest( p_irp, IO_NO_INCREMENT );\r
-\r
- AL_EXIT( AL_DBG_DRV );\r
- return status;\r
-}\r
-\r
-\r
-static NTSTATUS\r
-al_drv_close(\r
- IN DEVICE_OBJECT *p_dev_obj,\r
- IN IRP *p_irp )\r
-{\r
- UNUSED_PARAM( p_dev_obj );\r
-\r
- p_irp->IoStatus.Status = STATUS_SUCCESS;\r
- p_irp->IoStatus.Information = 0;\r
- IoCompleteRequest( p_irp, IO_NO_INCREMENT );\r
-\r
- return STATUS_SUCCESS;\r
-}\r
-\r
-\r
-static NTSTATUS\r
-al_drv_ioctl(\r
- IN DEVICE_OBJECT *p_dev_obj,\r
- IN IRP *p_irp )\r
-{\r
- NTSTATUS status;\r
- al_fdo_ext_t *p_ext;\r
- PIO_STACK_LOCATION p_io_stack;\r
-\r
- AL_ENTER( AL_DBG_DRV );\r
-\r
- /* Get the extension. */\r
- p_ext = p_dev_obj->DeviceExtension;\r
-\r
- /* Get the stack location. */\r
- p_io_stack = IoGetCurrentIrpStackLocation( p_irp );\r
-\r
- /* Acquire the stop lock. */\r
- status = IoAcquireRemoveLock( &p_ext->cl_ext.stop_lock, p_irp );\r
- if( !NT_SUCCESS( status ) )\r
- {\r
- p_irp->IoStatus.Status = status;\r
- p_irp->IoStatus.Information = 0;\r
- IoCompleteRequest( p_irp, IO_NO_INCREMENT );\r
- AL_EXIT( AL_DBG_DRV );\r
- return status;\r
- }\r
-\r
- /* Acquire the remove lock. */\r
- status = IoAcquireRemoveLock( &p_ext->cl_ext.remove_lock, p_irp );\r
- if( !NT_SUCCESS( status ) )\r
- {\r
- IoReleaseRemoveLock( &p_ext->cl_ext.stop_lock, p_irp );\r
- p_irp->IoStatus.Status = status;\r
- p_irp->IoStatus.Information = 0;\r
- IoCompleteRequest( p_irp, IO_NO_INCREMENT );\r
- AL_EXIT( AL_DBG_DRV );\r
- return status;\r
- }\r
- \r
- status = cl_to_ntstatus( al_dev_ioctl( p_irp ) );\r
- \r
- /* Only pass down if not handled and not PDO device. */\r
- if( status == STATUS_INVALID_DEVICE_REQUEST && p_ext->cl_ext.p_next_do )\r
- {\r
- IoSkipCurrentIrpStackLocation( p_irp );\r
- status = IoCallDriver( p_ext->cl_ext.p_next_do, p_irp );\r
- }\r
-\r
- /* Release the remove and stop locks. */\r
- IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp );\r
- IoReleaseRemoveLock( &p_ext->cl_ext.stop_lock, p_irp );\r
-\r
- AL_EXIT( AL_DBG_DRV );\r
- return status;\r
-}\r
-\r
-\r
-static NTSTATUS\r
-al_drv_sysctl(\r
- IN DEVICE_OBJECT *p_dev_obj,\r
- IN IRP *p_irp )\r
-{\r
- NTSTATUS status;\r
- al_fdo_ext_t *p_ext;\r
-\r
- AL_ENTER( AL_DBG_DRV );\r
-\r
- CL_ASSERT( p_dev_obj );\r
- CL_ASSERT( p_irp );\r
-\r
- p_ext = p_dev_obj->DeviceExtension;\r
-\r
- if( p_ext->cl_ext.p_next_do )\r
- {\r
- IoSkipCurrentIrpStackLocation( p_irp );\r
- status = IoCallDriver( p_ext->cl_ext.p_next_do, p_irp );\r
- }\r
- else\r
- {\r
- status = p_irp->IoStatus.Status;\r
- IoCompleteRequest( p_irp, IO_NO_INCREMENT );\r
- }\r
-\r
- AL_EXIT( AL_DBG_DRV );\r
- return status;\r
-}\r
-\r
-\r
-static NTSTATUS\r
-al_drv_ignore_power (\r
- IN DEVICE_OBJECT *p_dev_obj,\r
- IN IRP *p_irp )\r
-{\r
- NTSTATUS status;\r
- cl_pnp_po_ext_t *p_ext;\r
- IO_STACK_LOCATION *p_io_stack;\r
-\r
- AL_ENTER( AL_DBG_DRV );\r
-\r
- ASSERT( p_dev_obj );\r
- ASSERT( p_irp );\r
-\r
- p_ext = p_dev_obj->DeviceExtension;\r
-\r
- // XXX Debug for now\r
- p_io_stack = IoGetCurrentIrpStackLocation( p_irp );\r
- ASSERT( p_io_stack->MajorFunction == IRP_MJ_POWER );\r
-\r
- KdPrint((" Ignore IRP_MJ_POWER Minor Func 0x%x\n",\r
- p_io_stack->MinorFunction));\r
-\r
- // IrpIgnore\r
- PoStartNextPowerIrp( p_irp );\r
- IoSkipCurrentIrpStackLocation( p_irp );\r
- status = PoCallDriver( p_ext->p_next_do, p_irp );\r
-\r
- AL_EXIT( AL_DBG_DRV );\r
- return status;\r
-}\r
-\r
-\r
-static void\r
-al_drv_unload(\r
- IN DRIVER_OBJECT *p_driver_obj )\r
-{\r
- UNUSED_PARAM( p_driver_obj );\r
-\r
- AL_ENTER( AL_DBG_DRV );\r
-\r
- CL_DEINIT;\r
-\r
-#if defined(EVENT_TRACING)\r
- WPP_CLEANUP(p_driver_obj);\r
-#endif\r
-\r
- AL_EXIT( AL_DBG_DRV );\r
-}\r
-\r
-\r
-NTSTATUS\r
-DriverEntry(\r
- IN DRIVER_OBJECT *p_driver_obj,\r
- IN UNICODE_STRING *p_registry_path )\r
-{\r
- NTSTATUS status;\r
- ib_api_status_t ib_status;\r
-\r
- AL_ENTER( AL_DBG_DRV );\r
-\r
-#if defined(EVENT_TRACING)\r
- WPP_INIT_TRACING(p_driver_obj ,p_registry_path);\r
-#endif\r
-\r
- status = CL_INIT;\r
- if( !NT_SUCCESS(status) )\r
- {\r
- AL_TRACE_EXIT( AL_DBG_ERROR,\r
- ("cl_init returned %08X.\n", status) );\r
- return status;\r
- }\r
-\r
- /* Store the driver object pointer in the global parameters. */\r
- al_globals.p_driver_obj = p_driver_obj;\r
-\r
- /* Get the registry values. */\r
- status = __read_registry( p_registry_path );\r
- if( !NT_SUCCESS(status) )\r
- {\r
- CL_DEINIT;\r
- AL_TRACE_EXIT( AL_DBG_ERROR, \r
- ("__read_registry returned %08x.\n", status) );\r
- return status;\r
- }\r
-\r
- /* Setup the entry points. */\r
- p_driver_obj->MajorFunction[IRP_MJ_CREATE] = al_drv_open;\r
- p_driver_obj->MajorFunction[IRP_MJ_CLEANUP] = al_drv_cleanup;\r
- p_driver_obj->MajorFunction[IRP_MJ_CLOSE] = al_drv_close;\r
- p_driver_obj->MajorFunction[IRP_MJ_PNP] = cl_pnp;\r
- p_driver_obj->MajorFunction[IRP_MJ_POWER] = al_drv_ignore_power/*cl_power*/;\r
- p_driver_obj->MajorFunction[IRP_MJ_DEVICE_CONTROL] = al_drv_ioctl;\r
- p_driver_obj->MajorFunction[IRP_MJ_SYSTEM_CONTROL] = al_drv_sysctl;\r
- p_driver_obj->DriverUnload = al_drv_unload;\r
- p_driver_obj->DriverExtension->AddDevice = al_drv_add_device;\r
-\r
- /* Initialize AL */\r
- ib_status = al_initialize();\r
- if( ib_status != IB_SUCCESS )\r
- {\r
- al_cleanup();\r
- AL_TRACE_EXIT( AL_DBG_ERROR, ("al_initialize() returned %s.\n",\r
- ib_get_err_str(ib_status)) );\r
- return STATUS_UNSUCCESSFUL;\r
- }\r
-\r
- AL_EXIT( AL_DBG_DRV );\r
- return STATUS_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/* virtual function pointer tables shared between all instances of FDO. */\r
-static const cl_vfptr_pnp_po_t vfptr_fdo_pnp = {\r
- "IBAL",\r
- fdo_start,\r
- cl_irp_skip,\r
- cl_irp_skip,\r
- cl_do_sync_pnp,\r
- fdo_query_remove,\r
- fdo_release_resources,\r
- cl_do_remove,\r
- cl_do_sync_pnp,\r
- cl_irp_skip,\r
- fdo_query_capabilities,\r
- cl_irp_skip,\r
- cl_irp_skip,\r
- cl_do_sync_pnp,\r
- cl_irp_ignore,\r
- cl_irp_ignore,\r
- cl_irp_ignore /*fdo_query_remove_relations*/,\r
- cl_irp_ignore,\r
- cl_irp_ignore,\r
- cl_irp_ignore,\r
- cl_irp_ignore,\r
- cl_irp_ignore,\r
- fdo_query_interface, /* QueryInterface */\r
- cl_irp_ignore,\r
- cl_irp_ignore,\r
- cl_irp_ignore,\r
- cl_irp_ignore,\r
- cl_irp_ignore, /* QueryPower */\r
- cl_irp_ignore, /* SetPower */\r
- cl_irp_ignore, /* PowerSequence */\r
- cl_irp_ignore /* WaitWake */\r
-};\r
-\r
-\r
-/*\r
- * called from add_device to acquire an HCA's verb interfaces\r
- */\r
-\r
-static NTSTATUS\r
-get_hca_ifc( \r
- IN DEVICE_OBJECT *p_pdo,\r
- IN OUT RDMA_INTERFACE_VERBS **hca_ifc )\r
-{\r
- NTSTATUS status;\r
- IO_STACK_LOCATION io_stack;\r
- RDMA_INTERFACE_VERBS *ifc;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- *hca_ifc = NULL;\r
-\r
- ifc = ExAllocatePoolWithTag( PagedPool,\r
- sizeof(RDMA_INTERFACE_VERBS),\r
- 'iach' );\r
- if ( !ifc )\r
- {\r
- AL_TRACE_EXIT( AL_DBG_ERROR, \r
- ("Failed to allocate hca_ifc (%d bytes).\n",\r
- sizeof(RDMA_INTERFACE_VERBS)) );\r
- return STATUS_INSUFFICIENT_RESOURCES;\r
- }\r
- ifc->Verbs.p_hca_dev = NULL;\r
-\r
- // Query for InfiniBand HCA Interfaces (ifc) \r
-\r
- io_stack.MinorFunction = IRP_MN_QUERY_INTERFACE;\r
- io_stack.Parameters.QueryInterface.Version = VerbsVersion(2,0);\r
- io_stack.Parameters.QueryInterface.Size = sizeof(RDMA_INTERFACE_VERBS);\r
- io_stack.Parameters.QueryInterface.Interface =(INTERFACE*)ifc;\r
- io_stack.Parameters.QueryInterface.InterfaceSpecificData = NULL;\r
- io_stack.Parameters.QueryInterface.InterfaceType =\r
- &GUID_RDMA_INTERFACE_VERBS;\r
-\r
- status = cl_fwd_query_ifc( p_pdo, &io_stack );\r
-\r
- if( NT_SUCCESS( status ) && ifc->Verbs.p_hca_dev )\r
- {\r
- *hca_ifc = ifc;\r
- }\r
- else {\r
- if ( NT_SUCCESS( status ) )\r
- status = STATUS_UNSUCCESSFUL;\r
- ExFreePool( ifc );\r
- }\r
- AL_EXIT( AL_DBG_PNP );\r
-\r
- return status;\r
-}\r
-\r
-void\r
-release_hca_ifc( RDMA_INTERFACE_VERBS *ifc )\r
-{\r
- if ( !ifc )\r
- return;\r
- /*\r
- * Dereference the interface now so that the driver doesn't fail a\r
- * query remove IRP. \r
- */\r
- ifc->InterfaceHeader.InterfaceDereference( ifc->InterfaceHeader.Context );\r
- ExFreePool( ifc );\r
-}\r
-\r
-\r
-NTSTATUS\r
-al_drv_add_device(\r
- IN DRIVER_OBJECT *p_driver_obj,\r
- IN DEVICE_OBJECT *p_pdo )\r
-{\r
- NTSTATUS status;\r
- DEVICE_OBJECT *p_dev_obj, *p_next_do;\r
- al_fdo_ext_t *p_ext;\r
- UNICODE_STRING dev_name, dos_name;\r
- RDMA_INTERFACE_VERBS *hca_ifc;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- if( al_globals.p_al_ext )\r
- {\r
- AL_TRACE_EXIT( AL_DBG_ERROR,\r
- ("IBAL root already exists. Only one IBAL root allowed.\n") );\r
- return STATUS_NO_SUCH_DEVICE;\r
- }\r
-\r
- RtlInitUnicodeString( &dev_name, AL_DEVICE_NAME );\r
- RtlInitUnicodeString( &dos_name, L"\\DosDevices\\Global\\ibal" );\r
-\r
- /* Create the FDO device object to attach to the stack. */\r
- status = IoCreateDevice( p_driver_obj,\r
- sizeof(al_fdo_ext_t),\r
- &dev_name,\r
- FILE_DEVICE_INFINIBAND,\r
- FILE_DEVICE_SECURE_OPEN,\r
- FALSE,\r
- &p_dev_obj );\r
- if( !NT_SUCCESS(status) )\r
- {\r
- AL_TRACE_EXIT( AL_DBG_ERROR, \r
- ("Failed to create IBAL root FDO device.\n") );\r
- return status;\r
- }\r
-\r
- IoDeleteSymbolicLink( &dos_name );\r
- status = IoCreateSymbolicLink( &dos_name, &dev_name );\r
- if( !NT_SUCCESS(status) )\r
- {\r
- IoDeleteDevice( p_dev_obj );\r
- AL_TRACE_EXIT( AL_DBG_ERROR,\r
- ("Failed to create symlink for dos name.\n") );\r
- return status;\r
- }\r
-\r
- p_ext = p_dev_obj->DeviceExtension;\r
-\r
- p_next_do = IoAttachDeviceToDeviceStack( p_dev_obj, p_pdo );\r
- if( !p_next_do )\r
- {\r
- IoDeleteDevice( p_dev_obj );\r
- AL_TRACE_EXIT( AL_DBG_ERROR, ("IoAttachToDeviceStack failed.\n") );\r
- return STATUS_NO_SUCH_DEVICE;\r
- }\r
-\r
- cl_init_pnp_po_ext( p_dev_obj,\r
- p_next_do,\r
- p_pdo,\r
- g_al_dbg_level,\r
- &vfptr_fdo_pnp,\r
- NULL );\r
-\r
- /* Register the upper interface (the one used by ibal kernel clients) */\r
- status = IoRegisterDeviceInterface( p_pdo, \r
- &GUID_IB_AL_INTERFACE,\r
- NULL,\r
- &p_ext->al_ifc_name );\r
- if( !NT_SUCCESS( status ) )\r
- {\r
- IoDetachDevice( p_ext->cl_ext.p_next_do );\r
- IoDeleteDevice( p_dev_obj );\r
- AL_TRACE_EXIT( AL_DBG_ERROR, \r
- ("IoRegisterDeviceInterface for upper interface returned %08x\n",\r
- status) );\r
- return STATUS_NO_SUCH_DEVICE;\r
- }\r
-\r
-#if TMP_CI_IFC // temp debug - register ibal lower edge CI interface\r
-\r
- // __get_relations() not supported as HCA,ibal & bus all in same device\r
- // stack so remove dependencies are done for us; hence get_relations\r
- // can be skipped.\r
-\r
-DbgPrint("%s() IoRegister CI intf\n",__FUNCTION__);\r
- /* Register the lower (CI) interface (the one used by HCA VPDs). */\r
- status = IoRegisterDeviceInterface ( p_pdo, \r
- &GUID_IB_CI_INTERFACE,\r
- NULL,\r
- &p_ext->ci_ifc_name );\r
- if( !NT_SUCCESS( status ) )\r
- {\r
- RtlFreeUnicodeString( &p_ext->al_ifc_name );\r
- IoDetachDevice( p_ext->cl_ext.p_next_do );\r
- IoDeleteDevice( p_dev_obj );\r
- AL_TRACE_EXIT( AL_DBG_ERROR, \r
- ("IoRegisterDeviceInterface: lower (CI) interface returned %08x\n",\r
- status) );\r
- return STATUS_NO_SUCH_DEVICE;\r
- }\r
-#endif\r
-\r
-#if 1\r
- /* query for the HCA's interfaces - eventually the above CI ifc register\r
- * will go away in favor of this query. So say we all.\r
- */\r
- status = get_hca_ifc ( p_pdo, &hca_ifc );\r
- if ( NT_SUCCESS(status) ) {\r
- KdPrint(("%s() get_hca_if OK, releasing IFC\n",__FUNCTION__));\r
- release_hca_ifc( hca_ifc );\r
- }\r
- else {\r
- KdPrint(("%s() get_hca_if() failed? status 0x%x\n",__FUNCTION__,status));\r
- AL_TRACE_EXIT( AL_DBG_ERROR,\r
- ("unable to get HCA interface? status 0x%x\n",status));\r
- }\r
-#if 0\r
- if ( NT_SUCCESS(status) ) {\r
- status = ib_register_ca( &hca_ifc.Verbs );\r
- if( !NT_SUCCESS( status ) )\r
- release_hca_ifc( hca_ifc );\r
- }\r
-#endif\r
-#endif\r
- al_globals.p_al_ext = (al_fdo_ext_t*)p_ext;\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return STATUS_SUCCESS;\r
-}\r
-\r
-\r
-\r
-static NTSTATUS\r
-fdo_start(\r
- IN DEVICE_OBJECT* const p_dev_obj,\r
- IN IRP* const p_irp, \r
- OUT cl_irp_action_t* const p_action )\r
-{\r
- NTSTATUS status;\r
- al_fdo_ext_t *p_ext;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_ext = p_dev_obj->DeviceExtension;\r
-\r
- /* Handled on the way up. */\r
- status = cl_do_sync_pnp( p_dev_obj, p_irp, p_action );\r
- if( !NT_SUCCESS( status ) )\r
- {\r
- AL_TRACE_EXIT( AL_DBG_ERROR, \r
- ("Lower drivers failed IRP_MN_START_DEVICE.\n") );\r
- return status;\r
- }\r
-\r
- // enable IBAL client interface\r
- status = IoSetDeviceInterfaceState( &p_ext->al_ifc_name, TRUE );\r
- ASSERT( NT_SUCCESS( status ) );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return status;\r
-}\r
-\r
-\r
-static NTSTATUS\r
-fdo_query_remove( IN DEVICE_OBJECT* const p_dev_obj,\r
- IN IRP* const p_irp, \r
- OUT cl_irp_action_t* const p_action )\r
-{\r
- al_fdo_ext_t *p_ext;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_ext = p_dev_obj->DeviceExtension;\r
-\r
- if( p_ext->n_al_ifc_ref )\r
- {\r
- /*\r
- * Our interface is still being held by someone.\r
- * Rollback the PnP state that was changed in the cl_ext handler.\r
- */\r
- cl_rollback_pnp_state( &p_ext->cl_ext );\r
-\r
- /* Fail the query. */\r
- *p_action = IrpComplete;\r
- AL_TRACE_EXIT( AL_DBG_PNP, \r
- ("Failing IRP_MN_QUERY_REMOVE_DEVICE:\n"\r
- "\tAL Interface has %d references\n", p_ext->n_al_ifc_ref ) );\r
- return STATUS_UNSUCCESSFUL;\r
- }\r
-\r
- *p_action = IrpSkip;\r
- /* The FDO driver must set the status even when passing down. */\r
- p_irp->IoStatus.Status = STATUS_SUCCESS;\r
- AL_EXIT( AL_DBG_PNP );\r
- return STATUS_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * This function gets called after releasing the remove lock and waiting\r
- * for all other threads to release the lock. No more modifications will\r
- * occur to the PDO pointer vectors.\r
- */\r
-static void\r
-fdo_release_resources( IN DEVICE_OBJECT* const p_dev_obj )\r
-{\r
- al_fdo_ext_t *p_ext;\r
- NTSTATUS status;\r
- UNICODE_STRING dos_name;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_ext = p_dev_obj->DeviceExtension;\r
-\r
- //TODO: Fail outstanding I/O operations.\r
-\r
- /* Disable any exported interfaces. */\r
- status = IoSetDeviceInterfaceState( &p_ext->al_ifc_name, FALSE );\r
- ASSERT( NT_SUCCESS( status ) );\r
-\r
- RtlFreeUnicodeString( &p_ext->al_ifc_name );\r
-\r
- al_cleanup();\r
-\r
- if ( al_globals.p_al_ext ) {\r
- RtlInitUnicodeString( &dos_name, L"\\DosDevices\\Global\\ibal" );\r
- IoDeleteSymbolicLink( &dos_name );\r
- }\r
-\r
- al_globals.p_al_ext = NULL;\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static NTSTATUS\r
-fdo_query_capabilities(\r
- IN DEVICE_OBJECT* const p_dev_obj,\r
- IN IRP* const p_irp, \r
- OUT cl_irp_action_t* const p_action )\r
-{\r
- NTSTATUS status;\r
- al_fdo_ext_t *p_ext;\r
- IO_STACK_LOCATION *p_io_stack;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_ext = p_dev_obj->DeviceExtension;\r
-\r
- /* Process on the way up. */\r
- status = cl_do_sync_pnp( p_dev_obj, p_irp, p_action );\r
-\r
- if( !NT_SUCCESS( status ) )\r
- {\r
- AL_TRACE_EXIT( AL_DBG_ERROR, \r
- ("cl_do_sync_pnp returned %08x.\n", status) );\r
- return status;\r
- }\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( p_irp );\r
-\r
- /*\r
- * Store the device power maping into our extension since we're\r
- * the power policy owner. The mapping is used when handling\r
- * IRP_MN_SET_POWER IRPs.\r
- */\r
- cl_memcpy( p_ext->po_state, \r
- p_io_stack->Parameters.DeviceCapabilities.Capabilities->DeviceState,\r
- sizeof( p_ext->po_state ) );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return status;\r
-}\r
-\r
-\r
-#if NOT_USED // XXX\r
-\r
-static NTSTATUS\r
-fdo_query_remove_relations(\r
- IN DEVICE_OBJECT* const p_dev_obj,\r
- IN IRP* const p_irp, \r
- OUT cl_irp_action_t* const p_action )\r
-{\r
- NTSTATUS status;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- UNUSED_PARAM( p_dev_obj );\r
-\r
- status = port_mgr_get_bus_relations( 0, p_irp );\r
- if( status == STATUS_SUCCESS || \r
- status == STATUS_NO_SUCH_DEVICE )\r
- {\r
- status = iou_mgr_get_bus_relations( 0, p_irp );\r
- }\r
- if( status == STATUS_NO_SUCH_DEVICE )\r
- status = STATUS_SUCCESS;\r
-\r
- switch( status )\r
- {\r
- case STATUS_NO_SUCH_DEVICE:\r
- *p_action = IrpSkip;\r
- status = STATUS_SUCCESS;\r
- break;\r
-\r
- case STATUS_SUCCESS:\r
- *p_action = IrpPassDown;\r
- break;\r
-\r
- default:\r
- *p_action = IrpComplete;\r
- break;\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return status;\r
-}\r
-#endif\r
-\r
-\r
-void\r
-al_ref_ifc(\r
- IN DEVICE_OBJECT* p_dev_obj )\r
-{\r
- al_fdo_ext_t *p_ext;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_ext = p_dev_obj->DeviceExtension;\r
-\r
- cl_atomic_inc( &p_ext->n_al_ifc_ref );\r
- ObReferenceObject( p_dev_obj );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-void\r
-al_deref_ifc(\r
- IN DEVICE_OBJECT* p_dev_obj )\r
-{\r
- al_fdo_ext_t *p_ext;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_ext = p_dev_obj->DeviceExtension;\r
-\r
- cl_atomic_dec( &p_ext->n_al_ifc_ref );\r
- ObDereferenceObject( p_dev_obj );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-al_set_ifc(\r
- OUT ib_al_ifc_t* const p_ifc )\r
-{\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_ifc->wdm.Size = sizeof(ib_al_ifc_t);\r
- p_ifc->wdm.InterfaceReference = al_ref_ifc;\r
- p_ifc->wdm.InterfaceDereference = al_deref_ifc;\r
-\r
- p_ifc->sync_destroy = ib_sync_destroy;\r
- p_ifc->open_ca = ib_open_ca;\r
- p_ifc->query_ca = ib_query_ca;\r
- p_ifc->get_dev = get_ca_dev;\r
- p_ifc->close_ca = ib_close_ca;\r
- p_ifc->alloc_pd = ib_alloc_pd;\r
- p_ifc->dealloc_pd = ib_dealloc_pd;\r
- p_ifc->create_av = ib_create_av;\r
- p_ifc->query_av = ib_query_av;\r
- p_ifc->modify_av = ib_modify_av;\r
- p_ifc->destroy_av = ib_destroy_av;\r
- p_ifc->create_qp = ib_create_qp;\r
- p_ifc->get_spl_qp = ib_get_spl_qp;\r
- p_ifc->query_qp = ib_query_qp;\r
- p_ifc->modify_qp = ib_modify_qp;\r
- p_ifc->destroy_qp = ib_destroy_qp;\r
- p_ifc->create_cq = ib_create_cq;\r
- p_ifc->modify_cq = ib_modify_cq;\r
- p_ifc->query_cq = ib_query_cq;\r
- p_ifc->destroy_cq = ib_destroy_cq;\r
- p_ifc->reg_mem = ib_reg_mem;\r
- p_ifc->reg_phys = ib_reg_phys;\r
- p_ifc->query_mr = ib_query_mr;\r
- p_ifc->rereg_mem = ib_rereg_mem;\r
- p_ifc->reg_shmid = ib_reg_shmid;\r
- p_ifc->dereg_mr = ib_dereg_mr;\r
- p_ifc->create_mw = ib_create_mw;\r
- p_ifc->query_mw = ib_query_mw;\r
- p_ifc->bind_mw = ib_bind_mw;\r
- p_ifc->destroy_mw = ib_destroy_mw;\r
- p_ifc->post_send = ib_post_send;\r
- p_ifc->post_recv = ib_post_recv;\r
- p_ifc->send_mad = ib_send_mad;\r
- p_ifc->cancel_mad = ib_cancel_mad;\r
- p_ifc->poll_cq = ib_poll_cq;\r
- p_ifc->rearm_cq = ib_rearm_cq;\r
- p_ifc->join_mcast = ib_join_mcast;\r
- p_ifc->leave_mcast = ib_leave_mcast;\r
- p_ifc->local_mad = ib_local_mad;\r
- p_ifc->cm_listen = ib_cm_listen;\r
- p_ifc->cm_cancel = ib_cm_cancel;\r
- p_ifc->cm_req = ib_cm_req;\r
- p_ifc->cm_rep = ib_cm_rep;\r
- p_ifc->cm_rtu = ib_cm_rtu;\r
- p_ifc->cm_rej = ib_cm_rej;\r
- p_ifc->cm_mra = ib_cm_mra;\r
- p_ifc->cm_lap = ib_cm_lap;\r
- p_ifc->cm_apr = ib_cm_apr;\r
- p_ifc->force_apm = ib_force_apm;\r
- p_ifc->cm_dreq = ib_cm_dreq;\r
- p_ifc->cm_drep = ib_cm_drep;\r
- p_ifc->cm_handoff = ib_cm_handoff;\r
- p_ifc->create_ioc = ib_create_ioc;\r
- p_ifc->destroy_ioc = ib_destroy_ioc;\r
- p_ifc->reg_ioc = ib_reg_ioc;\r
- p_ifc->add_svc_entry = ib_add_svc_entry;\r
- p_ifc->remove_svc_entry = ib_remove_svc_entry;\r
- p_ifc->get_ca_guids = ib_get_ca_guids;\r
- p_ifc->get_ca_by_gid = ib_get_ca_by_gid;\r
- p_ifc->get_port_by_gid = ib_get_port_by_gid;\r
- p_ifc->create_mad_pool = ib_create_mad_pool;\r
- p_ifc->destroy_mad_pool = ib_destroy_mad_pool;\r
- p_ifc->reg_mad_pool = ib_reg_mad_pool;\r
- p_ifc->dereg_mad_pool = ib_dereg_mad_pool;\r
- p_ifc->get_mad = ib_get_mad;\r
- p_ifc->put_mad = ib_put_mad;\r
- p_ifc->init_dgrm_svc = ib_init_dgrm_svc;\r
- p_ifc->reg_mad_svc = ib_reg_mad_svc;\r
- p_ifc->reg_svc = ib_reg_svc;\r
- p_ifc->dereg_svc = ib_dereg_svc;\r
- p_ifc->query = ib_query;\r
- p_ifc->cancel_query = ib_cancel_query;\r
- p_ifc->reg_pnp = ib_reg_pnp;\r
- p_ifc->dereg_pnp = ib_dereg_pnp;\r
- p_ifc->subscribe = ib_subscribe;\r
- p_ifc->unsubscribe = ib_unsubscribe;\r
- p_ifc->reject_ioc = ib_reject_ioc;\r
- p_ifc->ci_call = ib_ci_call;\r
- p_ifc->open_al = ib_open_al;\r
- p_ifc->close_al = ib_close_al;\r
- p_ifc->get_err_str = ib_get_err_str;\r
- p_ifc->get_wc_status_str = ib_get_wc_status_str;\r
- p_ifc->create_mlnx_fmr = mlnx_create_fmr;\r
- p_ifc->map_phys_mlnx_fmr = mlnx_map_phys_fmr;\r
- p_ifc->unmap_mlnx_fmr = mlnx_unmap_fmr;\r
- p_ifc->destroy_mlnx_fmr = mlnx_destroy_fmr;\r
- p_ifc->create_mlnx_fmr_pool = mlnx_create_fmr_pool;\r
- p_ifc->destroy_mlnx_fmr_pool = mlnx_destroy_fmr_pool;\r
- p_ifc->map_phys_mlnx_fmr_pool = mlnx_map_phys_fmr_pool;\r
- p_ifc->unmap_mlnx_fmr_pool = mlnx_unmap_fmr_pool;\r
- p_ifc->flush_mlnx_fmr_pool = mlnx_flush_fmr_pool;\r
- p_ifc->create_srq = ib_create_srq;\r
- p_ifc->modify_srq = ib_modify_srq;\r
- p_ifc->query_srq = ib_query_srq;\r
- p_ifc->destroy_srq = ib_destroy_srq;\r
- p_ifc->post_srq_recv = ib_post_srq_recv;\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-static NTSTATUS\r
-__query_al_ifc(\r
- IN DEVICE_OBJECT* const p_dev_obj,\r
- IN IO_STACK_LOCATION* const p_io_stack )\r
-{\r
- ib_al_ifc_t *p_ifc;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- if( p_io_stack->Parameters.QueryInterface.Version != AL_INTERFACE_VERSION )\r
- {\r
- AL_TRACE_EXIT( AL_DBG_PNP, ("Incorrect AL interface version (%d)\n",\r
- p_io_stack->Parameters.QueryInterface.Version ) );\r
- return STATUS_NOT_SUPPORTED;\r
- }\r
-\r
- if( p_io_stack->Parameters.QueryInterface.Size < sizeof(ib_al_ifc_t) )\r
- {\r
- AL_TRACE_EXIT( AL_DBG_PNP, \r
- ("AL IFC Buffer too small (%d given, %d required).\n",\r
- p_io_stack->Parameters.QueryInterface.Size, sizeof(ib_al_ifc_t)) );\r
- return STATUS_BUFFER_TOO_SMALL;\r
- }\r
-\r
- // Copy the interface.\r
- p_ifc = (ib_al_ifc_t*)p_io_stack->Parameters.QueryInterface.Interface;\r
-\r
- p_ifc->wdm.Size = sizeof(ib_al_ifc_t);\r
- p_ifc->wdm.Version = AL_INTERFACE_VERSION;\r
- p_ifc->wdm.Context = p_dev_obj;\r
- p_ifc->wdm.InterfaceReference = al_ref_ifc;\r
- p_ifc->wdm.InterfaceDereference = al_deref_ifc;\r
-\r
- al_set_ifc( p_ifc );\r
-\r
- // take the reference before returning.\r
- al_ref_ifc( p_dev_obj );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return STATUS_SUCCESS;\r
-}\r
-\r
-#if TMP_CI_IFC\r
-\r
-static void\r
-al_ref_ci_ifc(\r
- IN DEVICE_OBJECT* p_dev_obj )\r
-{\r
- al_fdo_ext_t *p_ext;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_ext = p_dev_obj->DeviceExtension;\r
-\r
- cl_atomic_inc( &p_ext->n_ci_ifc_ref );\r
- ObReferenceObject( p_dev_obj );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-al_deref_ci_ifc(\r
- IN DEVICE_OBJECT* p_dev_obj )\r
-{\r
- al_fdo_ext_t *p_ext;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_ext = p_dev_obj->DeviceExtension;\r
-\r
- cl_atomic_dec( &p_ext->n_ci_ifc_ref );\r
- ObDereferenceObject( p_dev_obj );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static NTSTATUS\r
-__query_ci_ifc(\r
- IN DEVICE_OBJECT* const p_dev_obj,\r
- IN IO_STACK_LOCATION* const p_io_stack )\r
-{\r
- ib_ci_ifc_t *p_ifc;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-DbgPrint("%s() give up the ibal lower edge ci interface\n",__FUNCTION__);\r
-\r
- if( p_io_stack->Parameters.QueryInterface.Version != \r
- IB_CI_INTERFACE_VERSION )\r
- {\r
- AL_TRACE_EXIT( AL_DBG_PNP, ("Incorrect interface version (%d)\n",\r
- p_io_stack->Parameters.QueryInterface.Version ) );\r
- return STATUS_NOT_SUPPORTED;\r
- }\r
-\r
- if( p_io_stack->Parameters.QueryInterface.Size < sizeof(ib_ci_ifc_t) )\r
- {\r
- AL_TRACE_EXIT( AL_DBG_PNP, \r
- ("Buffer too small (%d given, %d required).\n",\r
- p_io_stack->Parameters.QueryInterface.Size, sizeof(ib_ci_ifc_t)) );\r
- return STATUS_BUFFER_TOO_SMALL;\r
- }\r
-\r
- /* Copy the interface. */\r
- p_ifc = (ib_ci_ifc_t*)p_io_stack->Parameters.QueryInterface.Interface;\r
-\r
- p_ifc->wdm.Size = sizeof(ib_ci_ifc_t);\r
- p_ifc->wdm.Version = IB_CI_INTERFACE_VERSION;\r
- p_ifc->wdm.Context = p_dev_obj;\r
- p_ifc->wdm.InterfaceReference = al_ref_ci_ifc;\r
- p_ifc->wdm.InterfaceDereference = al_deref_ci_ifc;\r
-\r
- /* Set the entry points. */\r
- p_ifc->register_ca = ib_register_ca;\r
- p_ifc->deregister_ca = ib_deregister_ca;\r
- /* XXX STAN - skip this as we are in the same device stack with remove\r
- * dependencies enforced by OS not by guid.\r
- */\r
- p_ifc->get_relations = NULL /*__get_relations*/;\r
- p_ifc->get_err_str = ib_get_err_str;\r
-\r
- /* take the reference before returning. */\r
- al_ref_ci_ifc( p_dev_obj );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return STATUS_SUCCESS;\r
-}\r
-\r
-#endif // TMP_CI_IFC\r
-\r
-\r
-static NTSTATUS\r
-fdo_query_interface (\r
- IN DEVICE_OBJECT* const p_dev_obj,\r
- IN IRP* const p_irp, \r
- OUT cl_irp_action_t* const p_action )\r
-{\r
- NTSTATUS status;\r
- IO_STACK_LOCATION *p_io_stack;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- PAGED_CODE();\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( p_irp );\r
- \r
- /* Compare requested GUID with our supported interface GUIDs. */\r
- if( IsEqualGUID( p_io_stack->Parameters.QueryInterface.InterfaceType,\r
- &GUID_IB_AL_INTERFACE ) )\r
- {\r
- status = __query_al_ifc( p_dev_obj, p_io_stack );\r
- }\r
-#if TMP_CI_IFC\r
- else if( IsEqualGUID( p_io_stack->Parameters.QueryInterface.InterfaceType,\r
- &GUID_IB_CI_INTERFACE ) )\r
- {\r
- status = __query_ci_ifc( p_dev_obj, p_io_stack );\r
- }\r
-#endif // TMP_CI_IFC\r
- else\r
- {\r
- status = p_irp->IoStatus.Status;\r
- KdPrint(("%s() GUID not interesting.\n",__FUNCTION__));\r
- }\r
-\r
- if( NT_SUCCESS( status ) )\r
- *p_action = IrpSkip;\r
- else if( status == STATUS_BUFFER_TOO_SMALL )\r
- *p_action = IrpComplete;\r
- else\r
- *p_action = IrpIgnore;\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return status;\r
-}\r
-\r
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_driver.h 10 2005-05-24 00:33:03Z ftillier $\r
- */\r
-\r
-\r
-#if !defined _AL_DRIVER_H_\r
-#define _AL_DRIVER_H_\r
-\r
-#include <ntddk.h>\r
-\r
-#include "al_debug.h"\r
-#include "iba/ib_al_ifc.h"\r
-#include "iba/ib_ci_ifc.h"\r
-#include "rdma/verbs.h"\r
-#include "complib/cl_bus_ifc.h"\r
-#include "complib/cl_types.h"\r
-#include "complib/cl_atomic.h"\r
-#include "complib/cl_debug.h"\r
-#include "complib/cl_mutex.h"\r
-#include "complib/cl_qlist.h"\r
-#include "complib/cl_ptr_vector.h"\r
-#include "complib/cl_pnp_po.h"\r
-/* Safe string functions. */\r
-#if WINVER == 0x500\r
-/*\r
- * Windows 2000 doesn't support the inline version of safe strings.\r
- * Force the use of the library version of safe strings.\r
- */\r
-#define NTSTRSAFE_LIB\r
-#endif\r
-#include <ntstrsafe.h>\r
-\r
-\r
-/*\r
- * Main header for Access Layer driver.\r
- */\r
-\r
-/*\r
- * ALLOC_PRAGMA sections:\r
- * PAGE\r
- * Default pagable code. Won't be locked in memory.\r
- *\r
- * PAGE_PNP\r
- * Code that needs to be locked in memory when the device is\r
- * in the paging, crash dump, or hibernation path.\r
- */\r
-\r
-\r
-/*\r
- * Device extension for the device object that serves as entry point for \r
- * the interface and IOCTL requests.\r
- */\r
-typedef struct _al_fdo_ext\r
-{\r
- /* ****WARNING ****\r
- * Since cl_ext is the 1st element in the Device extension, much of ibal\r
- * has been coded to EXPECT the cl_ext to be the 1st element of the fdo\r
- * extension. To such a degree, often the fdo extension is refered to\r
- * as a cl_pnp_po_ext_t and not an al_fdo_ext_t; particallary during PNP\r
- * handling.\r
- * You have been warned!\r
- */\r
- cl_pnp_po_ext_t cl_ext;\r
-\r
- /*\r
- * Device power map returned by the bus driver for the device, used \r
- * when sending IRP_MN_SET_POWER for device state in response to \r
- * IRP_MN_SET_POWER for system state.\r
- */\r
- DEVICE_POWER_STATE po_state[PowerSystemMaximum];\r
-\r
- /* Mutex to protect the CA list. */\r
-// FAST_MUTEX caMutex;\r
-\r
- /* List of CAs. */\r
-// cl_qlist_t caList;\r
-\r
- /*\r
- * Interface names are generated by IoRegisterDeviceInterface.\r
- * Interface name for the upper edge (AL interface).\r
- */\r
- UNICODE_STRING al_ifc_name;\r
-\r
- UNICODE_STRING ci_ifc_name;\r
-\r
- /* Number of references on the AL/CI interface. */\r
- atomic32_t n_al_ifc_ref; /* references on the Al interface */\r
- atomic32_t n_ci_ifc_ref;\r
-\r
-} al_fdo_ext_t;\r
-//\r
-//\r
-//typedef struct _AlCaContext\r
-//{\r
-// cl_list_item_t listItem;\r
-// void *ibtContext;\r
-// const IB_VERBS_INTERFACE_STANDARD2 *pCi;\r
-//\r
-// /* Number of ports, used to size the DEVICE_RELATIONS structure. */\r
-// uint32_t nPorts;\r
-//\r
-// DEVICE_OBJECT *pHcaPdo;\r
-//\r
-//} AlCaInfo_t;\r
-\r
-\r
-/*\r
- * Device extension for bus driver PDOs.\r
- */\r
-typedef struct _al_pdo_ext\r
-{\r
- cl_pnp_po_ext_t cl_ext;\r
-\r
- cl_list_item_t list_item;\r
-\r
- POWER_STATE dev_po_state;\r
-\r
- /*\r
- * Pointer to the bus root device extension. Used to manage access to\r
- * child PDO pointer vector when a child is removed politely.\r
- */\r
- al_fdo_ext_t *p_parent_ext;\r
-\r
- /*\r
- * The following two flags are exclusively set, but can both be FALSE.\r
- * Flag that indicates whether the device is present in the system or not.\r
- * This affects how a IRP_MN_REMOVE_DEVICE IRP is handled for a child PDO.\r
- * This flag is cleared when:\r
- * - an HCA (for IPoIB devices) is removed from the system for all port\r
- * devices loaded for that HCA\r
- * - an IOU is reported as removed by the CIA.\r
- */\r
- boolean_t b_present;\r
-\r
- /*\r
- * Flag that indicates whether the device has been reported to the PnP\r
- * manager as having been removed. That is, the device was reported\r
- * in a previous BusRelations query and not in a subsequent one.\r
- * This flag is set when\r
- * - the device is in the surprise remove state when the parent bus\r
- * device is removed\r
- * - the device is found to be not present during a BusRelations query\r
- * and thus not reported.\r
- */\r
- boolean_t b_reported_missing;\r
-\r
-} al_pdo_ext_t;\r
-\r
-\r
-/*\r
- * Device extension for IPoIB port PDOs.\r
- */\r
-typedef struct _al_port_ext\r
-{\r
- al_pdo_ext_t pdo;\r
-\r
- net64_t port_guid;\r
- uint32_t n_port;\r
-\r
- /* Number of references on the upper interface. */\r
- atomic32_t n_ifc_ref;\r
-\r
- ib_ca_handle_t h_ca;\r
-\r
-} al_port_ext_t;\r
-\r
-\r
-/*\r
- * Global Driver parameters.\r
- */\r
-typedef struct _al_globals\r
-{\r
- /* Driver object. Used for registering of Plug and Play notifications. */\r
- DRIVER_OBJECT *p_driver_obj;\r
-\r
- /* Flag to control loading of Ip Over Ib driver for each HCA port. */\r
-//XXX STAN uint32_t b_report_port_nic;\r
-\r
-\r
- /* Pointer to the one and only AL root. XXX - multi HCA support? */\r
- al_fdo_ext_t *p_al_ext;\r
-\r
-} al_globals_t;\r
-\r
-\r
-extern al_globals_t al_globals;\r
-\r
-/* enable CI interface as a workaround to use unmodified mthca driver\r
- * see al_driver.c\r
- *\r
- * 1 enables AL to export CI interface; how to do bus relations?\r
- */\r
-#define TMP_CI_IFC 1\r
-\r
-#endif /* !defined _AL_DRIVER_H_ */\r
+++ /dev/null
-LIBRARY ibal.sys\r
-\r
-EXPORTS\r
-; DllInitialize and DllUnload must be exported for the OS reference counting to\r
-; work, and must be private for the compiler to accept them.\r
-DllInitialize private\r
-DllUnload private\r
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_fmr_pool.h 1611 2006-08-20 14:48:55Z sleybo $\r
- */\r
-\r
-\r
-\r
-\r
-#include "al_debug.h"\r
-\r
-#if defined(EVENT_TRACING)\r
-#ifdef offsetof\r
-#undef offsetof\r
-#endif\r
-#include "al_fmr_pool.tmh"\r
-#endif\r
-\r
-#include "al_fmr_pool.h"\r
-#include "al_mr.h"\r
-#include "al_pd.h"\r
-\r
-#define hash_mix(a, b, c) \\r
- { \\r
- a -= b; a -= c; a ^= (c>>13); \\r
- b -= c; b -= a; b ^= (a<<8); \\r
- c -= a; c -= b; c ^= (b>>13); \\r
- a -= b; a -= c; a ^= (c>>12); \\r
- b -= c; b -= a; b ^= (a<<16); \\r
- c -= a; c -= b; c ^= (b>>5); \\r
- a -= b; a -= c; a ^= (c>>3); \\r
- b -= c; b -= a; b ^= (a<<10); \\r
- c -= a; c -= b; c ^= (b>>15); \\r
-}\r
-\r
-static inline uint32_t hash_2words(uint32_t a, uint32_t b, uint32_t c)\r
-{\r
- a += 0x9e3779b9;\r
- b += 0x9e3779b9;\r
- hash_mix(a, b, c);\r
- return c;\r
-}\r
-\r
-enum {\r
- IB_FMR_MAX_REMAPS = 32,\r
-\r
- IB_FMR_HASH_BITS = 8,\r
- IB_FMR_HASH_SIZE = 1 << IB_FMR_HASH_BITS,\r
- IB_FMR_HASH_MASK = IB_FMR_HASH_SIZE - 1\r
-};\r
-\r
-\r
-static inline uint32_t __fmr_hash(uint64_t first_page)\r
-{\r
- return hash_2words((uint32_t) first_page, (uint32_t) (first_page >> 32), 0) &\r
- (IB_FMR_HASH_SIZE - 1);\r
-}\r
-\r
-/* Caller must hold pool_lock */\r
-static inline mlnx_fmr_pool_element_t *__fmr_cache_lookup(\r
- mlnx_fmr_pool_t *p_pool,\r
- const uint64_t* const page_list,\r
- int page_list_len,\r
- uint64_t io_virtual_address)\r
-{\r
- cl_qlist_t *bucket;\r
- cl_list_item_t *p_list_item;\r
- mlnx_fmr_pool_element_t *p_fmr_el;\r
-\r
- if (!p_pool->cache_bucket)\r
- return NULL;\r
-\r
- bucket = p_pool->cache_bucket + __fmr_hash(*page_list);\r
-\r
- for( p_list_item = cl_qlist_head( bucket );\r
- p_list_item != cl_qlist_end( bucket);\r
- p_list_item = cl_qlist_next( p_list_item ) )\r
- {\r
- p_fmr_el = PARENT_STRUCT( p_list_item, mlnx_fmr_pool_element_t, cache_node );\r
- if (io_virtual_address == p_fmr_el->io_virtual_address &&\r
- page_list_len == p_fmr_el->page_list_len &&\r
- !memcmp(page_list, p_fmr_el->page_list, page_list_len * sizeof *page_list))\r
- return p_fmr_el;\r
- }\r
-\r
- return NULL;\r
-}\r
-\r
-\r
-static void \r
-__fmr_pool_batch_release(mlnx_fmr_pool_t *p_pool)\r
-{\r
- ib_api_status_t status;\r
- mlnx_fmr_pool_element_t *p_fmr_el;\r
- mlnx_fmr_handle_t h_fmr = NULL;\r
- cl_qlist_t unmap_list;\r
- cl_list_item_t *p_list_item;\r
- cl_qlist_t *bucket;\r
-\r
- cl_qlist_init(&unmap_list);\r
- \r
- cl_spinlock_acquire(&p_pool->pool_lock);\r
-\r
- for( p_list_item = cl_qlist_head( &p_pool->dirty_list );\r
- p_list_item != cl_qlist_end( &p_pool->dirty_list);\r
- p_list_item = cl_qlist_next( p_list_item ) )\r
- {\r
- p_fmr_el = PARENT_STRUCT( p_list_item, mlnx_fmr_pool_element_t, list_item );\r
- if (p_fmr_el->in_cash)\r
- {\r
- p_fmr_el->in_cash = FALSE;\r
- bucket = p_pool->cache_bucket + __fmr_hash(p_fmr_el->page_list[0]);\r
- cl_qlist_remove_item( bucket, &p_fmr_el->cache_node );\r
- }\r
- p_fmr_el->remap_count = 0;\r
- p_fmr_el->h_fmr->p_next = h_fmr;\r
- h_fmr = p_fmr_el->h_fmr;\r
- if (p_fmr_el->ref_count !=0) \r
- {\r
- AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_FMR_POOL, ("Unmapping FMR 0x%p with ref count %d",\r
- p_fmr_el, p_fmr_el->ref_count));\r
- }\r
- }\r
-\r
- cl_qlist_insert_list_head(&unmap_list, &p_pool->dirty_list );\r
- cl_qlist_init(&p_pool->dirty_list);\r
- p_pool->dirty_len = 0;\r
-\r
- cl_spinlock_release( &p_pool->pool_lock );\r
-\r
- if (cl_is_qlist_empty(&unmap_list)) {\r
- return;\r
- }\r
-\r
- status = mlnx_unmap_fmr(h_fmr);\r
- if (status != IB_SUCCESS)\r
- AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_FMR_POOL, ("mlnx_unmap_fmr returned %s", ib_get_err_str(status)));\r
-\r
-\r
- cl_spinlock_acquire( &p_pool->pool_lock );\r
- cl_qlist_insert_list_head(&p_pool->free_list,&unmap_list);\r
- cl_spinlock_release( &p_pool->pool_lock );\r
-}\r
-\r
-\r
-\r
-static int \r
-__fmr_cleanup_thread(void * p_pool_ptr)\r
-{\r
- mlnx_fmr_pool_t *p_pool = p_pool_ptr;\r
- atomic32_t flush_req;\r
- int forever = 1;\r
-\r
- do {\r
- flush_req = 0;\r
- if (p_pool->flush_req || p_pool->dirty_len >= p_pool->dirty_watermark)\r
- {\r
- __fmr_pool_batch_release(p_pool);\r
-\r
- if (p_pool->flush_req) \r
- {\r
- cl_event_signal(&p_pool->flush_done_event);\r
- flush_req = cl_atomic_dec( &p_pool->flush_req );\r
- }\r
- \r
- if (p_pool->flush_function)\r
- p_pool->flush_function( (mlnx_fmr_pool_handle_t)p_pool, p_pool->flush_arg);\r
- }\r
-\r
- if (!flush_req)\r
- {\r
- if (p_pool->should_stop)\r
- break;\r
- cl_event_wait_on(&p_pool->do_flush_event, EVENT_NO_TIMEOUT, TRUE);\r
- }\r
- } while (forever);\r
-\r
- return 0;\r
-}\r
-\r
-/*\r
- * Destroying the pool.\r
- */\r
-static void\r
-__destroying_fmr_pool(\r
- IN al_obj_t* p_obj )\r
-{\r
- mlnx_fmr_pool_t* p_pool;\r
-\r
- CL_ASSERT( p_obj );\r
- p_pool = PARENT_STRUCT( p_obj, mlnx_fmr_pool_t, obj );\r
- AL_PRINT(TRACE_LEVEL_ERROR, AL_DBG_FMR_POOL, ("pool %p\n", p_pool));\r
-\r
- // notify cleaning thread to exit\r
- cl_atomic_inc( &p_pool->should_stop );\r
- cl_event_signal(&p_pool->do_flush_event);\r
- cl_thread_destroy(&p_pool->thread);\r
-}\r
-\r
-/*\r
- * Cleanup the pool.\r
- */\r
-static void\r
-__cleanup_fmr_pool(\r
- IN al_obj_t* p_obj )\r
-{\r
- int i=0;\r
- ib_api_status_t status = IB_SUCCESS;\r
- mlnx_fmr_pool_t* p_pool;\r
- mlnx_fmr_pool_element_t *p_fmr_el;\r
- cl_list_item_t *p_list_item;\r
- cl_qlist_t *bucket;\r
-\r
- CL_ASSERT( p_obj );\r
- p_pool = PARENT_STRUCT( p_obj, mlnx_fmr_pool_t, obj );\r
- AL_PRINT(TRACE_LEVEL_ERROR, AL_DBG_FMR_POOL, ("pool %p\n", p_pool));\r
-\r
- // cleanup the dirty list stuff\r
- __fmr_pool_batch_release(p_pool);\r
-\r
- cl_spinlock_acquire(&p_pool->pool_lock);\r
-\r
- // merge the rest with free list\r
- for( p_list_item = cl_qlist_head( &p_pool->rest_list );\r
- p_list_item != cl_qlist_end( &p_pool->rest_list );\r
- p_list_item = cl_qlist_head( &p_pool->rest_list ) )\r
- {\r
- p_fmr_el = PARENT_STRUCT( p_list_item, mlnx_fmr_pool_element_t, list_item );\r
- if (p_fmr_el->in_cash)\r
- {\r
- p_fmr_el->in_cash = FALSE;\r
- bucket = p_pool->cache_bucket + __fmr_hash(p_fmr_el->page_list[0]);\r
- cl_qlist_remove_item( bucket, &p_fmr_el->cache_node );\r
- }\r
- cl_qlist_remove_item(&p_pool->rest_list, p_list_item);\r
- cl_qlist_insert_tail(&p_pool->free_list, &p_fmr_el->list_item);\r
- p_fmr_el->p_cur_list = &p_pool->free_list;\r
- }\r
-\r
- // cleanup the free list\r
- for( p_list_item = cl_qlist_head( &p_pool->free_list );\r
- p_list_item != cl_qlist_end( &p_pool->free_list );\r
- p_list_item = cl_qlist_head( &p_pool->free_list ) )\r
- {\r
- p_fmr_el = PARENT_STRUCT( p_list_item, mlnx_fmr_pool_element_t, list_item);\r
- cl_spinlock_release( &p_pool->pool_lock );\r
- if (p_fmr_el->remap_count)\r
- {\r
- p_fmr_el->h_fmr->p_next = NULL;\r
- status = mlnx_unmap_fmr(p_fmr_el->h_fmr);\r
- if (status != IB_SUCCESS)\r
- AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_FMR_POOL, ("mlnx_unmap_fmr returned %s\n", ib_get_err_str(status)));\r
-\r
- }\r
- status = mlnx_destroy_fmr(p_fmr_el->h_fmr);\r
- if (status != IB_SUCCESS)\r
- AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_FMR_POOL, ("mlnx_destroy_fmr returned %s\n", ib_get_err_str(status)));\r
-\r
- cl_spinlock_acquire(&p_pool->pool_lock);\r
- cl_qlist_remove_item(&p_pool->free_list, p_list_item);\r
- cl_free(p_fmr_el);\r
- ++i;\r
- }\r
-\r
- cl_spinlock_release( &p_pool->pool_lock );\r
-\r
- if (i < p_pool->pool_size)\r
- AL_PRINT(TRACE_LEVEL_ERROR, AL_DBG_FMR_POOL, ("pool still has %d regions registered\n",\r
- p_pool->pool_size - i));\r
-}\r
-\r
-\r
-/*\r
- * Free the pool.\r
- */\r
-static void\r
-__free_fmr_pool(\r
- IN al_obj_t* p_obj )\r
-{\r
- mlnx_fmr_pool_t* p_pool;\r
-\r
- CL_ASSERT( p_obj );\r
- p_pool = PARENT_STRUCT( p_obj, mlnx_fmr_pool_t, obj );\r
-\r
- cl_spinlock_destroy(&p_pool->pool_lock);\r
- destroy_al_obj( &p_pool->obj );\r
- if (p_pool->cache_bucket)\r
- cl_free( p_pool->cache_bucket );\r
- cl_free( p_pool );\r
- AL_PRINT(TRACE_LEVEL_ERROR, AL_DBG_FMR_POOL, ("__free_pool: pool %p\n", p_pool));\r
-}\r
-\r
-\r
-\r
-ib_api_status_t\r
-mlnx_create_fmr_pool(\r
- IN const ib_pd_handle_t h_pd,\r
- IN const mlnx_fmr_pool_create_t *p_fmr_pool_attr,\r
- OUT mlnx_fmr_pool_handle_t* const ph_pool )\r
-{\r
- ib_api_status_t status = IB_SUCCESS;\r
- mlnx_fmr_pool_t *p_pool;\r
- int i;\r
- int max_remaps;\r
- cl_status_t cl_status;\r
- mlnx_fmr_pool_element_t *p_fmr_el;\r
-\r
-\r
- AL_ENTER( AL_DBG_FMR_POOL );\r
-\r
- if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") );\r
- status = IB_INVALID_AL_HANDLE;\r
- goto end;\r
- }\r
-\r
- if( !ph_pool )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
- status = IB_INVALID_PARAMETER;\r
- goto end;\r
- }\r
-\r
- if( !p_fmr_pool_attr || !p_fmr_pool_attr->dirty_watermark)\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
- status = IB_INVALID_PARAMETER;\r
- goto end;\r
- }\r
-\r
- if (!h_pd->obj.p_ci_ca || !h_pd->obj.p_ci_ca->p_pnp_attr) \r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_STATE\n") );\r
- status = IB_INVALID_STATE;\r
- goto end;\r
- }\r
- \r
- // check whether the device support FMR\r
- if (!h_pd->obj.p_ci_ca->verbs.alloc_mlnx_fmr|| !h_pd->obj.p_ci_ca->verbs.dealloc_mlnx_fmr ||\r
- !h_pd->obj.p_ci_ca->verbs.map_phys_mlnx_fmr || !h_pd->obj.p_ci_ca->verbs.unmap_mlnx_fmr) {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Device does not support fast memory regions"));\r
- status = IB_UNSUPPORTED;\r
- goto end;\r
- }\r
-\r
- if (!h_pd->obj.p_ci_ca->p_pnp_attr->max_map_per_fmr)\r
- {\r
- max_remaps = IB_FMR_MAX_REMAPS;\r
- }\r
- else\r
- {\r
- max_remaps = h_pd->obj.p_ci_ca->p_pnp_attr->max_map_per_fmr;\r
- }\r
-\r
- // allocate pool object\r
- p_pool = cl_zalloc( sizeof( mlnx_fmr_pool_t ) );\r
- if( !p_pool )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Couldn't allocate pool struct"));\r
- status = IB_INSUFFICIENT_MEMORY;\r
- goto err_alloc_pool_obj;\r
- }\r
-\r
- // construct pool objects\r
- cl_spinlock_construct( &p_pool->pool_lock);\r
- cl_thread_construct(&p_pool->thread);\r
- cl_event_construct(&p_pool->do_flush_event);\r
- cl_event_construct(&p_pool->flush_done_event);\r
-\r
-\r
- // init pool objects\r
- p_pool->pool_size = 0;\r
- p_pool->max_pages = p_fmr_pool_attr->max_pages_per_fmr;\r
- p_pool->max_remaps = max_remaps;\r
- p_pool->dirty_watermark = p_fmr_pool_attr->dirty_watermark;\r
- p_pool->dirty_len = 0;\r
- p_pool->cache_bucket = NULL;\r
- p_pool->flush_function = p_fmr_pool_attr->flush_function;\r
- p_pool->flush_arg = p_fmr_pool_attr->flush_arg;\r
- cl_qlist_init(&p_pool->dirty_list);\r
- cl_qlist_init(&p_pool->free_list);\r
- cl_qlist_init(&p_pool->rest_list);\r
-\r
- if (p_fmr_pool_attr->cache) {\r
- p_pool->cache_bucket =\r
- cl_zalloc(IB_FMR_HASH_SIZE * sizeof *p_pool->cache_bucket);\r
- if (!p_pool->cache_bucket) {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed to allocate cache in pool"));\r
- status = IB_INSUFFICIENT_MEMORY;\r
- goto err_alloc_cache;\r
- }\r
-\r
- for (i = 0; i < IB_FMR_HASH_SIZE; ++i)\r
- cl_qlist_init(p_pool->cache_bucket + i);\r
- }\r
-\r
- cl_status = cl_spinlock_init( &p_pool->pool_lock );\r
- if( cl_status != CL_SUCCESS ) \r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed cl_spinlock_init"));\r
- status = IB_ERROR;\r
- goto err_pool_init;\r
- }\r
-\r
- cl_event_init(&p_pool->do_flush_event,FALSE);\r
- if( cl_status != CL_SUCCESS ) \r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed cl_event_init"));\r
- status = IB_ERROR;\r
- goto err_pool_init;\r
- }\r
-\r
- cl_event_init(&p_pool->flush_done_event,FALSE);\r
- if( cl_status != CL_SUCCESS ) \r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed cl_event_init"));\r
- status = IB_ERROR;\r
- goto err_pool_init;\r
- }\r
-\r
- cl_thread_init(&p_pool->thread ,__fmr_cleanup_thread,p_pool,"fmr_cleanup");\r
- if( cl_status != CL_SUCCESS ) \r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed cl_thread_init"));\r
- status = IB_ERROR;\r
- goto err_pool_init;\r
- }\r
-\r
- {\r
- mlnx_fmr_create_t fmr_attr;\r
- \r
- fmr_attr.max_pages = p_fmr_pool_attr->max_pages_per_fmr,\r
- fmr_attr.max_maps = p_pool->max_remaps,\r
- fmr_attr.page_size = p_fmr_pool_attr->page_size;\r
- fmr_attr.access_ctrl = p_fmr_pool_attr->access_ctrl;\r
-\r
-\r
- for (i = 0; i < p_fmr_pool_attr->pool_size; ++i)\r
- {\r
- p_fmr_el = cl_zalloc(sizeof (mlnx_fmr_pool_element_t) + p_fmr_pool_attr->max_pages_per_fmr * sizeof (uint64_t));\r
- if (!p_fmr_el)\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, (" failed to allocate struct for FMR %d \n",i));\r
- status = IB_INSUFFICIENT_MEMORY;\r
- goto err_alloc_cache_el;\r
- }\r
-\r
- p_fmr_el->h_pool = (mlnx_fmr_pool_handle_t)p_pool;\r
- p_fmr_el->remap_count = 0;\r
- p_fmr_el->ref_count = 0;\r
-\r
- status = mlnx_create_fmr(h_pd, &fmr_attr,&p_fmr_el->h_fmr);\r
- if (status != IB_SUCCESS)\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("mlnx_create_fmr failed for FMR %d with status %s.\n",i,ib_get_err_str(status)));\r
- cl_free(p_fmr_el);\r
- goto err_alloc_cache_el;\r
- }\r
-\r
- cl_qlist_insert_tail(&p_pool->free_list, &p_fmr_el->list_item);\r
- p_fmr_el->p_cur_list = &p_pool->free_list;\r
- ++p_pool->pool_size;\r
- }\r
-\r
- }\r
-\r
- /* Do IBAL stuff for creating and iniitializing the object */\r
- construct_al_obj( &p_pool->obj, AL_OBJ_TYPE_H_FMR_POOL);\r
-\r
- status = init_al_obj( &p_pool->obj, p_pool, FALSE, __destroying_fmr_pool, __cleanup_fmr_pool, __free_fmr_pool );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) );\r
- goto err_init_al_obj;\r
- }\r
-\r
- /* Attach the pool to the AL object. */\r
- status = attach_al_obj( &h_pd->obj, &p_pool->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- ref_al_obj( &p_pool->obj );\r
- p_pool->obj.pfn_destroy( &p_pool->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
- goto end;\r
- }\r
-\r
-\r
- /* Release the reference taken in init_al_obj */\r
- deref_al_obj( &p_pool->obj );\r
-\r
- *ph_pool = p_pool;\r
- status = IB_SUCCESS;\r
- goto end;\r
-\r
-err_init_al_obj:\r
- destroy_al_obj( &p_pool->obj );\r
-\r
-err_alloc_cache_el:\r
- __destroying_fmr_pool( &p_pool->obj );\r
- __cleanup_fmr_pool( &p_pool->obj );\r
-\r
-err_pool_init:\r
- if (p_pool->cache_bucket)\r
- cl_free( p_pool->cache_bucket );\r
-\r
-err_alloc_cache: \r
- cl_free( p_pool );\r
-\r
-err_alloc_pool_obj:\r
-end:\r
- AL_EXIT( AL_DBG_FMR_POOL );\r
- return status;\r
-}\r
-\r
-/**\r
- * ib_destroy_fmr_pool - Free FMR pool\r
- * @pool:FMR pool to free\r
- *\r
- * Destroy an FMR pool and free all associated resources.\r
- */\r
-ib_api_status_t\r
-mlnx_destroy_fmr_pool(\r
- IN const mlnx_fmr_pool_handle_t h_pool)\r
-{\r
- mlnx_fmr_pool_t *p_pool = (mlnx_fmr_pool_t*)h_pool;\r
-\r
- AL_ENTER( AL_DBG_FMR_POOL );\r
-\r
- if( AL_OBJ_INVALID_HANDLE( h_pool, AL_OBJ_TYPE_H_FMR_POOL ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- ref_al_obj( &p_pool->obj );\r
- p_pool->obj.pfn_destroy( &p_pool->obj, NULL );\r
-\r
- AL_EXIT( AL_DBG_FMR_POOL );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-ib_api_status_t\r
-mlnx_flush_fmr_pool(mlnx_fmr_pool_handle_t h_pool)\r
-{\r
-\r
- ib_api_status_t status = IB_SUCCESS;\r
- mlnx_fmr_pool_t *p_pool = (mlnx_fmr_pool_t*)h_pool;\r
-\r
- if( AL_OBJ_INVALID_HANDLE( h_pool, AL_OBJ_TYPE_H_FMR_POOL ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- ref_al_obj( &p_pool->obj );\r
-\r
- cl_atomic_inc( &p_pool->flush_req );\r
- cl_event_signal(&p_pool->do_flush_event);\r
- if (cl_event_wait_on(&p_pool->flush_done_event, EVENT_NO_TIMEOUT, TRUE))\r
- status = IB_ERROR;\r
-\r
- deref_al_obj( &p_pool->obj );\r
-\r
- return status;\r
-}\r
-\r
-ib_api_status_t\r
-mlnx_map_phys_fmr_pool(\r
- IN const mlnx_fmr_pool_handle_t h_pool ,\r
- IN const uint64_t* const page_list,\r
- IN const int list_len,\r
- IN OUT uint64_t* const p_vaddr,\r
- OUT net32_t* const p_lkey,\r
- OUT net32_t* const p_rkey,\r
- OUT mlnx_fmr_pool_el_t *pp_fmr_el)\r
-{\r
-\r
- ib_api_status_t status = IB_SUCCESS;\r
- mlnx_fmr_pool_t *p_pool = (mlnx_fmr_pool_t*)h_pool;\r
- mlnx_fmr_pool_element_t *p_fmr_el;\r
- cl_qlist_t *bucket;\r
-\r
- if( AL_OBJ_INVALID_HANDLE( h_pool, AL_OBJ_TYPE_H_FMR_POOL ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- if (list_len < 1 || list_len > p_pool->max_pages)\r
- return IB_INVALID_PARAMETER;\r
-\r
- ref_al_obj( &p_pool->obj );\r
-\r
- cl_spinlock_acquire(&p_pool->pool_lock);\r
-\r
- p_fmr_el = __fmr_cache_lookup( p_pool, page_list, list_len, *p_vaddr );\r
- if (p_fmr_el) {\r
- /* found in cache */\r
- ++p_fmr_el->ref_count;\r
- if (p_fmr_el->ref_count == 1) {\r
- cl_qlist_remove_item( p_fmr_el->p_cur_list, &p_fmr_el->list_item );\r
- cl_qlist_insert_tail(&p_pool->rest_list, &p_fmr_el->list_item);\r
- p_fmr_el->p_cur_list = &p_pool->rest_list;\r
- }\r
-\r
- cl_spinlock_release(&p_pool->pool_lock);\r
- goto end;\r
- }\r
- \r
- if (cl_is_qlist_empty(&p_pool->free_list)) {\r
- cl_spinlock_release(&p_pool->pool_lock);\r
- status = IB_RESOURCE_BUSY;\r
- goto exit;\r
- }\r
-\r
- p_fmr_el = PARENT_STRUCT(cl_qlist_remove_head(&p_pool->free_list),mlnx_fmr_pool_element_t,list_item);\r
- if (p_fmr_el->in_cash)\r
- {\r
- p_fmr_el->in_cash = FALSE;\r
- bucket = p_pool->cache_bucket + __fmr_hash(p_fmr_el->page_list[0]);\r
- cl_qlist_remove_item( bucket, &p_fmr_el->cache_node );\r
- }\r
- cl_spinlock_release(&p_pool->pool_lock);\r
-\r
- status = mlnx_map_phys_fmr(p_fmr_el->h_fmr, page_list,\r
- list_len, p_vaddr, p_lkey, p_rkey);\r
-\r
- if (status != IB_SUCCESS) {\r
- cl_spinlock_acquire(&p_pool->pool_lock);\r
- cl_qlist_insert_tail(&p_pool->free_list, &p_fmr_el->list_item);\r
- p_fmr_el->p_cur_list = &p_pool->free_list;\r
- cl_spinlock_release(&p_pool->pool_lock);\r
- goto exit;\r
- }\r
-\r
- ++p_fmr_el->remap_count;\r
- p_fmr_el->ref_count = 1;\r
- p_fmr_el->lkey = *p_lkey;\r
- p_fmr_el->rkey = *p_rkey;\r
- p_fmr_el->io_virtual_address = *p_vaddr;\r
- cl_spinlock_acquire(&p_pool->pool_lock);\r
- cl_qlist_insert_tail(&p_pool->rest_list, &p_fmr_el->list_item);\r
- p_fmr_el->p_cur_list = &p_pool->rest_list;\r
- cl_spinlock_release(&p_pool->pool_lock);\r
-\r
- if (p_pool->cache_bucket) {\r
- p_fmr_el->io_virtual_address = *p_vaddr;\r
- p_fmr_el->page_list_len = list_len;\r
- memcpy(p_fmr_el->page_list, page_list, list_len * sizeof(*page_list));\r
-\r
- cl_spinlock_acquire(&p_pool->pool_lock);\r
- bucket = p_pool->cache_bucket + __fmr_hash(p_fmr_el->page_list[0]);\r
- cl_qlist_insert_head( bucket, &p_fmr_el->cache_node );\r
- p_fmr_el->in_cash = TRUE;\r
- cl_spinlock_release(&p_pool->pool_lock);\r
- }\r
-\r
-end:\r
- *pp_fmr_el = (mlnx_fmr_pool_el_t)p_fmr_el;\r
- *p_lkey = p_fmr_el->lkey;\r
- *p_rkey = p_fmr_el->rkey;\r
- *p_vaddr = p_fmr_el->io_virtual_address;\r
- \r
-exit:\r
- deref_al_obj( &p_pool->obj );\r
- return status;\r
-}\r
-\r
-\r
-\r
-ib_api_status_t\r
-mlnx_unmap_fmr_pool(\r
- IN mlnx_fmr_pool_el_t p_fmr_el )\r
-{\r
- mlnx_fmr_pool_t *p_pool;\r
-\r
- p_pool = (mlnx_fmr_pool_t*)p_fmr_el->h_pool;\r
-\r
- if( AL_OBJ_INVALID_HANDLE( (mlnx_fmr_pool_handle_t)p_pool, AL_OBJ_TYPE_H_FMR_POOL ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- ref_al_obj( &p_pool->obj );\r
- \r
- cl_spinlock_acquire(&p_pool->pool_lock);\r
-\r
- --p_fmr_el->ref_count;\r
- if (!p_fmr_el->ref_count) \r
- {\r
- if (p_fmr_el->p_cur_list == &p_pool->rest_list)\r
- cl_qlist_remove_item( p_fmr_el->p_cur_list, &p_fmr_el->list_item );\r
-\r
- if (p_fmr_el->remap_count < p_pool->max_remaps) \r
- {\r
- cl_qlist_insert_tail(&p_pool->free_list,&p_fmr_el->list_item);\r
- p_fmr_el->p_cur_list = &p_pool->free_list;\r
- }\r
- else\r
- {\r
- cl_qlist_insert_tail(&p_pool->dirty_list, &p_fmr_el->list_item);\r
- p_fmr_el->p_cur_list = &p_pool->dirty_list;\r
- ++p_pool->dirty_len;\r
- cl_event_signal(&p_pool->do_flush_event);\r
- }\r
- }\r
-\r
- if (p_fmr_el->ref_count < 0)\r
- {\r
- AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_FMR_POOL, ("FMR %p has ref count %d < 0\n",p_fmr_el, p_fmr_el->ref_count));\r
- }\r
- cl_spinlock_release( &p_pool->pool_lock );\r
-\r
- deref_al_obj( &p_pool->obj );\r
- return IB_SUCCESS;\r
-}\r
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_fmr_pool.h 1611 2006-08-20 14:48:55Z sleybo $\r
- */\r
-\r
-\r
-#if !defined(__AL_FMR_POOL_H__)\r
-#define __AL_FMR_POOL_H__\r
-\r
-#include <complib/cl_qlist.h>\r
-#include <iba/ib_al.h>\r
-#include "al_common.h"\r
-\r
-\r
-/*\r
- * If an FMR is not in use, then the list member will point to either\r
- * its pool's free_list (if the FMR can be mapped again; that is,\r
- * remap_count < pool->max_remaps) or its pool's dirty_list (if the\r
- * FMR needs to be unmapped before being remapped). In either of\r
- * these cases it is a bug if the ref_count is not 0. In other words,\r
- * if ref_count is > 0, then the list member must not be linked into\r
- * either free_list or dirty_list.\r
- *\r
- * The cache_node member is used to link the FMR into a cache bucket\r
- * (if caching is enabled). This is independent of the reference\r
- * count of the FMR. When a valid FMR is released, its ref_count is\r
- * decremented, and if ref_count reaches 0, the FMR is placed in\r
- * either free_list or dirty_list as appropriate. However, it is not\r
- * removed from the cache and may be "revived" if a call to\r
- * ib_fmr_register_physical() occurs before the FMR is remapped. In\r
- * this case we just increment the ref_count and remove the FMR from\r
- * free_list/dirty_list.\r
- *\r
- * Before we remap an FMR from free_list, we remove it from the cache\r
- * (to prevent another user from obtaining a stale FMR). When an FMR\r
- * is released, we add it to the tail of the free list, so that our\r
- * cache eviction policy is "least recently used."\r
- *\r
- * All manipulation of ref_count, list and cache_node is protected by\r
- * pool_lock to maintain consistency.\r
- */\r
-\r
-#pragma warning( disable : 4200)\r
-typedef struct _mlnx_fmr_pool_element {\r
- mlnx_fmr_handle_t h_fmr;\r
- mlnx_fmr_pool_handle_t h_pool;\r
- cl_list_item_t list_item;\r
- cl_qlist_t *p_cur_list;\r
- cl_list_item_t cache_node;\r
- boolean_t in_cash;\r
- int ref_count;\r
- int remap_count;\r
- uint64_t io_virtual_address;\r
- net32_t lkey;\r
- net32_t rkey;\r
- int page_list_len;\r
- uint64_t page_list[0];\r
-} mlnx_fmr_pool_element_t;\r
-#pragma warning( default : 4200)\r
-\r
-\r
-typedef struct _mlnx_fmr_pool {\r
-\r
- al_obj_t obj; /* Child of ib_al_handle_t */\r
- cl_spinlock_t pool_lock;\r
-\r
- int pool_size;\r
- int max_pages;\r
- int max_remaps;\r
- int dirty_watermark;\r
- int dirty_len;\r
- cl_qlist_t free_list;\r
- cl_qlist_t dirty_list;\r
- cl_qlist_t rest_list; /* those, that not in free and not in dirty */\r
- cl_qlist_t *cache_bucket;\r
-\r
- void (*flush_function) (mlnx_fmr_pool_handle_t h_pool,void* arg);\r
- void *flush_arg;\r
-\r
- cl_thread_t thread;\r
- cl_event_t do_flush_event;\r
- cl_event_t flush_done_event;\r
- atomic32_t flush_req;\r
- atomic32_t should_stop;\r
-} mlnx_fmr_pool_t;\r
-\r
-\r
-#endif /* IB_FMR_POOL_H */\r
-\r
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_ioc_pnp.c 940 2008-02-11 11:29:01Z leonidk $\r
- */\r
-\r
-\r
-#include <iba/ib_al.h>\r
-#include "al_pnp.h"\r
-#include "al_ioc_pnp.h"\r
-#include "al_debug.h"\r
-#if defined(EVENT_TRACING)\r
-#ifdef offsetof\r
-#undef offsetof\r
-#endif\r
-#include "al_ioc_pnp.tmh"\r
-#endif\r
-#include "ib_common.h"\r
-#include "al_mgr.h"\r
-#include "al_ca.h"\r
-#include <complib/cl_timer.h>\r
-#include <complib/cl_qpool.h>\r
-#include <complib/cl_qmap.h>\r
-#include <complib/cl_fleximap.h>\r
-#include <complib/cl_math.h>\r
-\r
-\r
-/* Basic sweep operation flow:\r
- *\r
- * NOTE: Empty lines indicate asynchronous decoupling.\r
- * 1. Timer expires\r
- * 2. Issue SA query for all CA nodes\r
- * 3. Issue SA query for all paths\r
- *\r
- * 4. Query callback for first query - store results.\r
- * 5. Query callback for second query - process results.\r
- * 6. Associate paths to nodes.\r
- * 7. For each node, use the first path to send a IOU Info query.\r
- *\r
- * 8a. Recv callback (success) - record IOU info, decrement ref count.\r
- * 8b. Recv callback (failure) - decrement ref count.\r
- * 8c. Send failure - decrement ref count.\r
- * 8d. Send timeout - pick next path and repeate IOU info query.\r
- * 9. Queue results to async proc thread once ref count hits zero\r
- *\r
- * 10. Discard any nodes that failed IOU info query, or reported no IOCs.\r
- * 11. For each node scanned that is already known, compare change ID\r
- * 12a. Change ID identical - report any path changes.\r
- * 12b. Change ID different - for each active IOC slot, query IOC profile.\r
- *\r
- * 13a. Recv callback (success) - associate IOC with IOU, decrement ref count.\r
- * 13b. Recv callback (failure) - decrement ref count.\r
- * 13c. Send failure - decrement ref count.\r
- * 14. Queue results to async proc thread once ref count hits zero.\r
- *\r
- * 15. Discard any nodes that have no IOCs.\r
- * 16. For each IOC of each node, query all service entries.\r
- *\r
- * 17a. Recv callback (success) - copy service entries, decrement ref count.\r
- * 17b. Recv callback (failure) - Remove IOC from IOU, decrement ref count.\r
- * 17c. Send failure - Remove IOC from IOU, decrement ref count.\r
- * 18. Queue results to async proc thread once ref count hits zero.\r
- *\r
- * 19. Discard any nodes that have no IOCs.\r
- * 20. Compare new node map to known nodes and report changes.\r
- * 21. Compare IOCs for any duplicates and report changes.\r
- * 22. Compare paths for any duplicates and report changes.\r
- * 23. Reset sweep timer.\r
- *\r
- * Note: the sweep timer is reset at any point where there can be no further\r
- * progress towards.\r
- */\r
-\r
-\r
-/* Number of entries in the various pools to grow by. */\r
-#define IOC_PNP_POOL_GROW (10)\r
-\r
-\r
-/* IOC PnP Manager structure. */\r
-typedef struct _ioc_pnp_mgr\r
-{\r
- al_obj_t obj;\r
-\r
- cl_qlist_t iou_reg_list;\r
- cl_qlist_t ioc_reg_list;\r
-\r
- ib_pnp_handle_t h_pnp;\r
-\r
- cl_async_proc_item_t async_item;\r
- boolean_t async_item_is_busy;\r
-\r
- cl_spinlock_t iou_pool_lock;\r
- cl_qpool_t iou_pool;\r
- cl_spinlock_t ioc_pool_lock;\r
- cl_qpool_t ioc_pool;\r
- cl_spinlock_t path_pool_lock;\r
- cl_qpool_t path_pool;\r
-\r
- cl_fmap_t iou_map; /* Map of currently known IOUs */\r
- cl_fmap_t sweep_map; /* Map of IOUs from sweep results. */\r
- cl_timer_t sweep_timer;/* Timer to trigger sweep. */\r
- atomic32_t query_cnt; /* Number of sweep results outstanding. */\r
-\r
-} ioc_pnp_mgr_t;\r
-\r
-\r
-/* Per-port IOC PnP agent. */\r
-typedef struct _ioc_pnp_svc\r
-{\r
- al_obj_t obj;\r
-\r
- net64_t ca_guid;\r
- net64_t port_guid;\r
-\r
- ib_qp_handle_t h_qp;\r
- ib_pool_key_t pool_key;\r
- ib_mad_svc_handle_t h_mad_svc;\r
-\r
- atomic32_t query_cnt;\r
- ib_query_handle_t h_node_query;\r
- ib_query_handle_t h_path_query;\r
- ib_mad_element_t *p_node_element;\r
- ib_mad_element_t *p_path_element;\r
- uint32_t num_nodes;\r
- uint32_t num_paths;\r
-\r
-} ioc_pnp_svc_t;\r
-\r
-\r
-/****d* Access Layer:IOC PnP/iou_path_t\r
-* NAME\r
-* iou_path_t\r
-*\r
-* DESCRIPTION\r
-* Describes a path to an IOU node.\r
-*\r
-* SYNOPSIS\r
-*/\r
-typedef struct _iou_path\r
-{\r
- cl_fmap_item_t map_item;\r
- net64_t ca_guid;\r
- net64_t port_guid;\r
- ib_path_rec_t rec;\r
-\r
-} iou_path_t;\r
-/*\r
-* FIELDS\r
-* map_item\r
-* Map item for storing paths in a map.\r
-*\r
-* path_rec\r
-* Path record.\r
-*\r
-* SEE ALSO\r
-* IOC PnP\r
-*********/\r
-\r
-\r
-/****d* Access Layer:IOC PnP/iou_node_t\r
-* NAME\r
-* iou_node_t\r
-*\r
-* DESCRIPTION\r
-* Describes an IOU node on the fabric.\r
-*\r
-* SYNOPSIS\r
-*/\r
-typedef struct _iou_node\r
-{\r
- cl_fmap_item_t map_item;\r
- cl_fmap_t path_map;\r
- cl_qmap_t ioc_map;\r
- cl_spinlock_t lock;\r
-\r
- iou_path_t *p_config_path;\r
-\r
- net64_t ca_guid;\r
- net64_t guid;\r
- net64_t chassis_guid;\r
- uint8_t slot;\r
- net32_t vend_id;\r
- net16_t dev_id;\r
- net32_t revision;\r
- ib_iou_info_t info;\r
-\r
- char desc[IB_NODE_DESCRIPTION_SIZE + 1];\r
-\r
-} iou_node_t;\r
-/*\r
-* FIELDS\r
-* map_item\r
-* Map item for storing IOUs in a map.\r
-*\r
-* path_map\r
-* Map of paths to the IOU.\r
-*\r
-* ioc_map\r
-* Map of IOCs on the IOU.\r
-*\r
-* p_config_path\r
-* Path used to get configuration information from the IOU.\r
-*\r
-* ca_guid\r
-* CA GUID through which the IOU is accessible.\r
-*\r
-* guid\r
-* Node GUID used as key when storing IOUs in the map.\r
-*\r
-* chassis_guid\r
-* GUID of the chassis in which the IOU is installed.\r
-*\r
-* slot\r
-* Slot number in the chassis in which the IOU is installed.\r
-*\r
-* vend_id\r
-* Vendor ID of the IOU.\r
-*\r
-* dev_id\r
-* Device ID of the IOU.\r
-*\r
-* revision\r
-* Device revision of the IOU.\r
-*\r
-* info\r
-* I/O unit info structure.\r
-*\r
-* desc\r
-* Node description as provided in ib_node_record_t, along with space for\r
-* terminating NULL.\r
-*\r
-* NOTES\r
-* The guid member must follow the ca_guid member to allow both guids to\r
-* be compared in single call to cl_memcmp.\r
-*\r
-* SEE ALSO\r
-* IOC PnP\r
-*********/\r
-\r
-\r
-#pragma warning(disable:4324)\r
-typedef struct _iou_ioc\r
-{\r
- cl_map_item_t map_item;\r
- iou_node_t *p_iou;\r
- uint8_t slot;\r
- ib_ioc_profile_t profile;\r
- uint8_t num_valid_entries;\r
- ib_svc_entry_t *p_svc_entries;\r
- atomic32_t ref_cnt;\r
-\r
-} iou_ioc_t;\r
-#pragma warning(default:4324)\r
-\r
-\r
-typedef enum _sweep_state\r
-{\r
- SWEEP_IOU_INFO,\r
- SWEEP_IOC_PROFILE,\r
- SWEEP_SVC_ENTRIES,\r
- SWEEP_COMPLETE\r
-\r
-} sweep_state_t;\r
-\r
-\r
-typedef struct _ioc_sweep_results\r
-{\r
- cl_async_proc_item_t async_item;\r
- sweep_state_t state;\r
- ioc_pnp_svc_t *p_svc;\r
- atomic32_t query_cnt;\r
- cl_fmap_t iou_map;\r
-\r
-} ioc_sweep_results_t;\r
-\r
-\r
-typedef struct _al_pnp_ioc_event\r
-{\r
- size_t rec_size;\r
- ib_pnp_rec_t *p_rec;\r
- ib_pnp_rec_t *p_user_rec;\r
-\r
-} al_pnp_ioc_event_t;\r
-\r
-\r
-/* Global instance of the IOC PnP manager. */\r
-ioc_pnp_mgr_t *gp_ioc_pnp = NULL;\r
-uint32_t g_ioc_query_timeout = 250;\r
-uint32_t g_ioc_query_retries = 4;\r
-uint32_t g_ioc_poll_interval = 30000;\r
-\r
-\r
-\r
-/******************************************************************************\r
-*\r
-* IOC PnP Manager functions - global object.\r
-*\r
-******************************************************************************/\r
-static void\r
-__construct_ioc_pnp(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr );\r
-\r
-static ib_api_status_t\r
-__init_ioc_pnp(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr );\r
-\r
-static void\r
-__destroying_ioc_pnp(\r
- IN al_obj_t *p_obj );\r
-\r
-static void\r
-__free_ioc_pnp(\r
- IN al_obj_t *p_obj );\r
-\r
-static ib_api_status_t\r
-__ioc_pnp_cb(\r
- IN ib_pnp_rec_t *p_pnp_rec );\r
-\r
-static cl_status_t\r
-__init_iou(\r
- IN void* const p_obj,\r
- IN void* context,\r
- OUT cl_pool_item_t** const pp_pool_item );\r
-\r
-/******************************************************************************\r
-*\r
-* IOC PnP manager sweep-related functions.\r
-*\r
-******************************************************************************/\r
-static iou_node_t*\r
-__get_iou(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr,\r
- IN const net64_t ca_guid,\r
- IN const ib_node_record_t* const p_node_rec );\r
-\r
-static void\r
-__put_iou(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr,\r
- IN iou_node_t* const p_iou );\r
-\r
-static void\r
-__put_iou_map(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr,\r
- IN cl_fmap_t* const p_iou_map );\r
-\r
-static iou_path_t*\r
-__get_path(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr,\r
- IN const net64_t ca_guid,\r
- IN const net64_t port_guid,\r
- IN const ib_path_rec_t* const p_path_rec );\r
-\r
-static void\r
-__put_path(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr,\r
- IN iou_path_t* const p_path );\r
-\r
-static void\r
-__put_path_map(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr,\r
- IN cl_fmap_t* const p_path_map );\r
-\r
-static iou_ioc_t*\r
-__get_ioc(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr,\r
- IN const uint32_t ioc_slot,\r
- IN const ib_ioc_profile_t* const p_profile );\r
-\r
-static void\r
-__put_ioc(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr,\r
- IN iou_ioc_t* const p_ioc );\r
-\r
-static void\r
-__put_ioc_map(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr,\r
- IN cl_qmap_t* const p_ioc_map );\r
-\r
-static intn_t\r
-__iou_cmp(\r
- IN const void* const p_key1,\r
- IN const void* const p_key2 );\r
-\r
-static intn_t\r
-__path_cmp(\r
- IN const void* const p_key1,\r
- IN const void* const p_key2 );\r
-\r
-static void\r
-__ioc_pnp_timer_cb(\r
- IN void *context );\r
-\r
-static void\r
-__ioc_async_cb(\r
- IN cl_async_proc_item_t *p_async_item );\r
-\r
-/******************************************************************************\r
-*\r
-* IOC PnP service - per local port child of IOC PnP manager.\r
-*\r
-******************************************************************************/\r
-static ib_api_status_t\r
-__create_ioc_pnp_svc(\r
- IN ib_pnp_rec_t *p_pnp_rec );\r
-\r
-static ib_api_status_t\r
-__init_ioc_pnp_svc(\r
- IN ioc_pnp_svc_t* const p_ioc_pnp_svc,\r
- IN const ib_pnp_rec_t* const p_pnp_rec );\r
-\r
-static void\r
-__destroying_ioc_pnp_svc(\r
- IN al_obj_t *p_obj );\r
-\r
-static void\r
-__free_ioc_pnp_svc(\r
- IN al_obj_t *p_obj );\r
-\r
-/******************************************************************************\r
-*\r
-* IOC PnP service sweep functions.\r
-*\r
-******************************************************************************/\r
-static void\r
-__ioc_pnp_recv_cb(\r
- IN const ib_mad_svc_handle_t h_mad_svc,\r
- IN void *mad_svc_context,\r
- IN ib_mad_element_t *p_request_mad );\r
-\r
-static void\r
-__ioc_pnp_send_cb(\r
- IN const ib_mad_svc_handle_t h_mad_svc,\r
- IN void *mad_svc_context,\r
- IN ib_mad_element_t *p_mad_response );\r
-\r
-static void\r
-__node_rec_cb(\r
- IN ib_query_rec_t *p_query_rec );\r
-\r
-static void\r
-__path_rec_cb(\r
- IN ib_query_rec_t *p_query_rec );\r
-\r
-static void\r
-__process_sweep(\r
- IN cl_async_proc_item_t *p_async_item );\r
-\r
-static void\r
-__process_query(\r
- IN ioc_pnp_svc_t* const p_svc );\r
-\r
-static void\r
-__process_nodes(\r
- IN ioc_pnp_svc_t* const p_svc,\r
- IN cl_qmap_t* const p_iou_map );\r
-\r
-static void\r
-__process_paths(\r
- IN ioc_pnp_svc_t* const p_svc,\r
- IN cl_qmap_t* const p_iou_map );\r
-\r
-static void\r
-__build_iou_map(\r
- IN cl_qmap_t* const p_port_map,\r
- IN OUT cl_fmap_t* const p_iou_map );\r
-\r
-static void\r
-__format_dm_get(\r
- IN const void* const context1,\r
- IN const void* const context2,\r
- IN const iou_path_t* const p_path,\r
- IN const net16_t attr_id,\r
- IN const net32_t attr_mod,\r
- IN OUT ib_mad_element_t* const p_mad_element );\r
-\r
-static ib_api_status_t\r
-__ioc_query_sa(\r
- IN ioc_pnp_svc_t* const p_svc );\r
-\r
-static ib_api_status_t\r
-__query_ious(\r
- IN ioc_sweep_results_t* const p_results );\r
-\r
-static ib_api_status_t\r
-__query_ioc_profiles(\r
- IN ioc_sweep_results_t* const p_results );\r
-\r
-static ib_api_status_t\r
-__query_svc_entries(\r
- IN ioc_sweep_results_t* const p_results );\r
-\r
-static void\r
-__update_results(\r
- IN ioc_sweep_results_t* const p_results );\r
-\r
-static void\r
-__iou_info_resp(\r
- IN OUT iou_node_t* const p_iou,\r
- IN const ib_dm_mad_t* const p_mad );\r
-\r
-static void\r
-__ioc_profile_resp(\r
- IN OUT iou_node_t* const p_iou,\r
- IN const ib_dm_mad_t* const p_mad );\r
-\r
-static void\r
-__svc_entry_resp(\r
- IN OUT iou_ioc_t* const p_ioc,\r
- IN const ib_dm_mad_t* const p_mad );\r
-\r
-/******************************************************************************\r
-*\r
-* Client registration and notification management\r
-*\r
-******************************************************************************/\r
-static void\r
-__change_ious(\r
- IN cl_fmap_t* const p_cur_ious,\r
- IN cl_fmap_t* const p_dup_ious );\r
-\r
-static void\r
-__add_ious(\r
- IN cl_fmap_t* const p_cur_ious,\r
- IN cl_fmap_t* const p_new_ious,\r
- IN al_pnp_t* const p_reg OPTIONAL );\r
-\r
-static void\r
-__remove_ious(\r
- IN cl_fmap_t* const p_old_ious );\r
-\r
-static void\r
-__add_iocs(\r
- IN iou_node_t* const p_iou,\r
- IN cl_qmap_t* const p_new_iocs,\r
- IN al_pnp_t* const p_reg OPTIONAL );\r
-\r
-static void\r
-__remove_iocs(\r
- IN iou_node_t* const p_iou,\r
- IN cl_qmap_t* const p_old_iocs );\r
-\r
-static void\r
-__add_paths(\r
- IN iou_node_t* const p_iou,\r
- IN cl_qmap_t* const p_ioc_map,\r
- IN cl_fmap_t* const p_new_paths,\r
- IN al_pnp_t* const p_reg OPTIONAL );\r
-\r
-static void\r
-__add_ioc_paths(\r
- IN iou_ioc_t* const p_ioc,\r
- IN cl_fmap_t* const p_new_paths,\r
- IN al_pnp_t* const p_reg OPTIONAL );\r
-\r
-static void\r
-__remove_paths(\r
- IN cl_qmap_t* const p_ioc_map,\r
- IN cl_fmap_t* const p_old_paths );\r
-\r
-static void\r
-__report_iou_add(\r
- IN iou_node_t* const p_iou,\r
- IN al_pnp_t* const p_reg OPTIONAL );\r
-\r
-static void\r
-__report_iou_remove(\r
- IN iou_node_t* const p_iou );\r
-\r
-static void\r
-__report_ioc_add(\r
- IN iou_node_t* const p_iou,\r
- IN iou_ioc_t* const p_ioc,\r
- IN al_pnp_t* const p_reg OPTIONAL );\r
-\r
-static void\r
-__report_ioc_remove(\r
- IN iou_node_t* const p_iou,\r
- IN iou_ioc_t* const p_ioc );\r
-\r
-static void\r
-__report_path(\r
- IN iou_ioc_t* const p_ioc,\r
- IN iou_path_t* const p_path,\r
- IN ib_pnp_event_t pnp_event,\r
- IN al_pnp_t* const p_reg OPTIONAL );\r
-\r
-\r
-/******************************************************************************\r
-*\r
-* Implementation\r
-*\r
-******************************************************************************/\r
-ib_api_status_t\r
-create_ioc_pnp(\r
- IN al_obj_t* const p_parent_obj )\r
-{\r
- ib_api_status_t status;\r
- ib_pnp_req_t pnp_req;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- CL_ASSERT( !gp_ioc_pnp );\r
-\r
- gp_ioc_pnp = (ioc_pnp_mgr_t*)cl_zalloc( sizeof(ioc_pnp_mgr_t) );\r
- if( !gp_ioc_pnp )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Failed to allocate IOC PnP manager.\n") );\r
- return IB_INSUFFICIENT_MEMORY;\r
- }\r
-\r
- __construct_ioc_pnp( gp_ioc_pnp );\r
-\r
- status = __init_ioc_pnp( gp_ioc_pnp );\r
- if( status != IB_SUCCESS )\r
- {\r
- __free_ioc_pnp( &gp_ioc_pnp->obj );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("__construct_ioc_pnp returned %s\n", ib_get_err_str( status )) );\r
- return status;\r
- }\r
-\r
- /* Attach to the parent object. */\r
- status = attach_al_obj( p_parent_obj, &gp_ioc_pnp->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- gp_ioc_pnp->obj.pfn_destroy( &gp_ioc_pnp->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- /* Register for port PnP notifications. */\r
- cl_memclr( &pnp_req, sizeof(pnp_req) );\r
- pnp_req.pnp_class = IB_PNP_PORT;\r
- pnp_req.pnp_context = gp_ioc_pnp;\r
- pnp_req.pfn_pnp_cb = __ioc_pnp_cb;\r
- status = ib_reg_pnp( gh_al, &pnp_req, &gp_ioc_pnp->h_pnp );\r
- if( status != IB_SUCCESS )\r
- {\r
- gp_ioc_pnp->obj.pfn_destroy( &gp_ioc_pnp->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_reg_pnp failed with status %s.\n",\r
- ib_get_err_str( status )) );\r
- return status;\r
- }\r
- /*\r
- * We don't release the reference taken in init_al_obj\r
- * since PnP deregistration is asynchronous.\r
- */\r
- \r
- AL_EXIT( AL_DBG_PNP );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-static void\r
-__construct_ioc_pnp(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr )\r
-{\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- cl_qlist_init( &p_ioc_mgr->iou_reg_list );\r
- cl_qlist_init( &p_ioc_mgr->ioc_reg_list );\r
- cl_fmap_init( &p_ioc_mgr->iou_map, __iou_cmp );\r
- construct_al_obj( &p_ioc_mgr->obj, AL_OBJ_TYPE_IOC_PNP_MGR );\r
- cl_spinlock_construct( &p_ioc_mgr->iou_pool_lock );\r
- cl_spinlock_construct( &p_ioc_mgr->path_pool_lock );\r
- cl_spinlock_construct( &p_ioc_mgr->ioc_pool_lock );\r
- cl_qpool_construct( &p_ioc_mgr->iou_pool );\r
- cl_qpool_construct( &p_ioc_mgr->path_pool );\r
- cl_qpool_construct( &p_ioc_mgr->ioc_pool );\r
- cl_fmap_init( &p_ioc_mgr->sweep_map, __iou_cmp );\r
- cl_timer_construct( &p_ioc_mgr->sweep_timer );\r
- p_ioc_mgr->async_item.pfn_callback = __ioc_async_cb;\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__init_ioc_pnp(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr )\r
-{\r
- ib_api_status_t status;\r
- cl_status_t cl_status;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- /* Initialize the pool locks. */\r
- cl_status = cl_spinlock_init( &p_ioc_mgr->iou_pool_lock );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("cl_spinlock_init returned %#x\n", cl_status) );\r
- return ib_convert_cl_status( cl_status );\r
- }\r
-\r
- cl_status = cl_spinlock_init( &p_ioc_mgr->path_pool_lock );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("cl_spinlock_init returned %#x\n", cl_status) );\r
- return ib_convert_cl_status( cl_status );\r
- }\r
-\r
- cl_status = cl_spinlock_init( &p_ioc_mgr->ioc_pool_lock );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("cl_spinlock_init returned %#x\n", cl_status) );\r
- return ib_convert_cl_status( cl_status );\r
- }\r
-\r
- /* Initialize the pools */\r
- cl_status = cl_qpool_init( &p_ioc_mgr->iou_pool, 0, 0, IOC_PNP_POOL_GROW,\r
- sizeof(iou_node_t), __init_iou, NULL, p_ioc_mgr );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("cl_qpool_init returned %#x\n", cl_status) );\r
- return ib_convert_cl_status( cl_status );\r
- }\r
-\r
- cl_status = cl_qpool_init( &p_ioc_mgr->path_pool, 0, 0, IOC_PNP_POOL_GROW,\r
- sizeof(iou_path_t), NULL, NULL, NULL );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("cl_qpool_init returned %#x\n", cl_status) );\r
- return ib_convert_cl_status( cl_status );\r
- }\r
-\r
- cl_status = cl_qpool_init( &p_ioc_mgr->ioc_pool, 0, 0, IOC_PNP_POOL_GROW,\r
- sizeof(iou_ioc_t), NULL, NULL, NULL );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("cl_qpool_init returned %#x\n", cl_status) );\r
- return ib_convert_cl_status( cl_status );\r
- }\r
-\r
- /* Initialize the sweep timer. */\r
- cl_status = cl_timer_init( &p_ioc_mgr->sweep_timer,\r
- __ioc_pnp_timer_cb, p_ioc_mgr );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("cl_timer_init failed with %#x\n", cl_status) );\r
- return ib_convert_cl_status( cl_status );\r
- }\r
-\r
- status = init_al_obj( &p_ioc_mgr->obj, p_ioc_mgr, TRUE,\r
- __destroying_ioc_pnp, NULL, __free_ioc_pnp );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("init_al_obj returned %s\n", ib_get_err_str( status )) );\r
- return status;\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-static void\r
-__destroying_ioc_pnp(\r
- IN al_obj_t *p_obj )\r
-{\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- UNUSED_PARAM( p_obj );\r
- CL_ASSERT( &gp_ioc_pnp->obj == p_obj );\r
-\r
- /* Stop the timer. */\r
- cl_timer_stop( &gp_ioc_pnp->sweep_timer );\r
-\r
- if( gp_ioc_pnp->h_pnp )\r
- {\r
- status = ib_dereg_pnp( gp_ioc_pnp->h_pnp,\r
- (ib_pfn_destroy_cb_t)deref_al_obj );\r
- CL_ASSERT( status == IB_SUCCESS );\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__free_ioc_pnp(\r
- IN al_obj_t *p_obj )\r
-{\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- CL_ASSERT( &gp_ioc_pnp->obj == p_obj );\r
-\r
- /*\r
- * Return all items from the maps to their pools before\r
- * destroying the pools\r
- */\r
- __put_iou_map( gp_ioc_pnp, &gp_ioc_pnp->iou_map );\r
- cl_timer_destroy( &gp_ioc_pnp->sweep_timer );\r
- cl_qpool_destroy( &gp_ioc_pnp->ioc_pool );\r
- cl_qpool_destroy( &gp_ioc_pnp->path_pool );\r
- cl_qpool_destroy( &gp_ioc_pnp->iou_pool );\r
- cl_spinlock_destroy( &gp_ioc_pnp->ioc_pool_lock );\r
- cl_spinlock_destroy( &gp_ioc_pnp->path_pool_lock );\r
- cl_spinlock_destroy( &gp_ioc_pnp->iou_pool_lock );\r
- destroy_al_obj( p_obj );\r
- cl_free( gp_ioc_pnp );\r
- gp_ioc_pnp = NULL;\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static cl_status_t\r
-__init_iou(\r
- IN void* const p_obj,\r
- IN void* context,\r
- OUT cl_pool_item_t** const pp_pool_item )\r
-{\r
- iou_node_t *p_iou;\r
-\r
- UNUSED_PARAM( context );\r
-\r
- p_iou = (iou_node_t*)p_obj;\r
- \r
- cl_spinlock_construct( &p_iou->lock );\r
- cl_qmap_init( &p_iou->ioc_map );\r
- cl_fmap_init( &p_iou->path_map, __path_cmp );\r
-\r
- *pp_pool_item = &p_iou->map_item.pool_item;\r
- return cl_spinlock_init( &p_iou->lock );\r
-}\r
-\r
-\r
-static iou_node_t*\r
-__get_iou(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr,\r
- IN const net64_t ca_guid,\r
- IN const ib_node_record_t* const p_node_rec )\r
-{\r
- iou_node_t *p_iou;\r
- cl_pool_item_t *p_item;\r
-\r
- cl_spinlock_acquire( &p_ioc_mgr->iou_pool_lock );\r
- p_item = cl_qpool_get( &p_ioc_mgr->iou_pool );\r
- cl_spinlock_release( &p_ioc_mgr->iou_pool_lock );\r
- if( !p_item )\r
- return NULL;\r
-\r
- p_iou = PARENT_STRUCT( PARENT_STRUCT( p_item, cl_map_item_t, pool_item ),\r
- iou_node_t, map_item );\r
-\r
- p_iou->ca_guid = ca_guid;\r
- p_iou->guid = p_node_rec->node_info.node_guid;\r
- p_iou->chassis_guid = p_node_rec->node_info.sys_guid;\r
- p_iou->vend_id = ib_node_info_get_vendor_id( &p_node_rec->node_info );\r
- p_iou->dev_id = p_node_rec->node_info.device_id;\r
- p_iou->revision = p_node_rec->node_info.revision;\r
-\r
- cl_memclr( &p_iou->info, sizeof(ib_iou_info_t) );\r
-\r
- cl_memcpy( p_iou->desc, p_node_rec->node_desc.description,\r
- IB_NODE_DESCRIPTION_SIZE );\r
-\r
- /* The terminating NULL should never get overwritten. */\r
- CL_ASSERT( p_iou->desc[IB_NODE_DESCRIPTION_SIZE] == '\0' );\r
-\r
- return p_iou;\r
-}\r
-\r
-\r
-static void\r
-__put_iou(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr,\r
- IN iou_node_t* const p_iou )\r
-{\r
- __put_path_map( p_ioc_mgr, &p_iou->path_map );\r
- __put_ioc_map( p_ioc_mgr, &p_iou->ioc_map );\r
-\r
- cl_spinlock_acquire( &p_ioc_mgr->iou_pool_lock );\r
- cl_qpool_put( &p_ioc_mgr->iou_pool, &p_iou->map_item.pool_item );\r
- cl_spinlock_release( &p_ioc_mgr->iou_pool_lock );\r
-}\r
-\r
-\r
-static void\r
-__put_iou_map(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr,\r
- IN cl_fmap_t* const p_iou_map )\r
-{\r
- cl_qlist_t list;\r
- cl_fmap_item_t *p_item;\r
- iou_node_t *p_iou;\r
-\r
- cl_qlist_init( &list );\r
-\r
- p_item = cl_fmap_head( p_iou_map );\r
- while( p_item != cl_fmap_end( p_iou_map ) )\r
- {\r
- cl_fmap_remove_item( p_iou_map, p_item );\r
-\r
- p_iou = PARENT_STRUCT(\r
- PARENT_STRUCT( p_item, cl_map_item_t, pool_item ),\r
- iou_node_t, map_item );\r
-\r
- __put_path_map( p_ioc_mgr, &p_iou->path_map );\r
- __put_ioc_map( p_ioc_mgr, &p_iou->ioc_map );\r
- cl_qlist_insert_head( &list, &p_item->pool_item.list_item );\r
- p_item = cl_fmap_head( p_iou_map );\r
- }\r
- cl_spinlock_acquire( &p_ioc_mgr->iou_pool_lock );\r
- cl_qpool_put_list( &p_ioc_mgr->iou_pool, &list );\r
- cl_spinlock_release( &p_ioc_mgr->iou_pool_lock );\r
-}\r
-\r
-\r
-static iou_path_t*\r
-__get_path(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr,\r
- IN const net64_t ca_guid,\r
- IN const net64_t port_guid,\r
- IN const ib_path_rec_t* const p_path_rec )\r
-{\r
- cl_pool_item_t *p_item;\r
- iou_path_t *p_path;\r
-\r
- cl_spinlock_acquire( &p_ioc_mgr->path_pool_lock );\r
- p_item = cl_qpool_get( &p_ioc_mgr->path_pool );\r
- cl_spinlock_release( &p_ioc_mgr->path_pool_lock );\r
- if( !p_item )\r
- return NULL;\r
-\r
- p_path = PARENT_STRUCT( PARENT_STRUCT( p_item, cl_fmap_item_t, pool_item ),\r
- iou_path_t, map_item );\r
-\r
- /*\r
- * Store the local CA and port GUID for this path to let recipients\r
- * of a PATH_ADD event avoid a CA lookup based on GID.\r
- */\r
- p_path->ca_guid = ca_guid;\r
- p_path->port_guid = port_guid;\r
-\r
- p_path->rec = *p_path_rec;\r
- /* Clear the num_path field since it is just "undefined". */\r
- p_path->rec.num_path = 0;\r
- /*\r
- * Clear reserved fields in case they were set to prevent undue path\r
- * thrashing.\r
- */\r
- p_path->rec.resv0 = 0;\r
- p_path->rec.resv1 = 0;\r
- p_path->rec.resv2 = 0;\r
-\r
- return p_path;\r
-}\r
-\r
-\r
-static void\r
-__put_path(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr,\r
- IN iou_path_t* const p_path )\r
-{\r
- cl_spinlock_acquire( &p_ioc_mgr->path_pool_lock );\r
- cl_qpool_put( &p_ioc_mgr->path_pool, &p_path->map_item.pool_item );\r
- cl_spinlock_release( &p_ioc_mgr->path_pool_lock );\r
-}\r
-\r
-\r
-static void\r
-__put_path_map(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr,\r
- IN cl_fmap_t* const p_path_map )\r
-{\r
- cl_qlist_t list;\r
- cl_fmap_item_t *p_item;\r
- iou_path_t *p_path;\r
-\r
- cl_qlist_init( &list );\r
-\r
- p_item = cl_fmap_head( p_path_map );\r
- while( p_item != cl_fmap_end( p_path_map ) )\r
- {\r
- cl_fmap_remove_item( p_path_map, p_item );\r
-\r
- p_path = PARENT_STRUCT( PARENT_STRUCT( p_item, cl_fmap_item_t, pool_item ),\r
- iou_path_t, map_item );\r
-\r
- cl_qlist_insert_head( &list, &p_item->pool_item.list_item );\r
- p_item = cl_fmap_head( p_path_map );\r
- }\r
- cl_spinlock_acquire( &p_ioc_mgr->path_pool_lock );\r
- cl_qpool_put_list( &p_ioc_mgr->path_pool, &list );\r
- cl_spinlock_release( &p_ioc_mgr->path_pool_lock );\r
-}\r
-\r
-\r
-static iou_ioc_t*\r
-__get_ioc(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr,\r
- IN const uint32_t ioc_slot,\r
- IN const ib_ioc_profile_t* const p_profile )\r
-{\r
- cl_pool_item_t *p_item;\r
- iou_ioc_t *p_ioc;\r
- ib_svc_entry_t *p_svc_entries;\r
-\r
- if( !p_profile->num_svc_entries )\r
- return NULL;\r
-\r
- p_svc_entries =\r
- cl_zalloc( sizeof(ib_svc_entry_t) * p_profile->num_svc_entries );\r
- if( !p_svc_entries )\r
- return NULL;\r
-\r
- cl_spinlock_acquire( &p_ioc_mgr->ioc_pool_lock );\r
- p_item = cl_qpool_get( &p_ioc_mgr->ioc_pool );\r
- cl_spinlock_release( &p_ioc_mgr->ioc_pool_lock );\r
- if( !p_item )\r
- {\r
- cl_free( p_svc_entries );\r
- return NULL;\r
- }\r
-\r
- p_ioc = PARENT_STRUCT( PARENT_STRUCT( p_item, cl_map_item_t, pool_item ),\r
- iou_ioc_t, map_item );\r
- \r
- CL_ASSERT( !p_ioc->ref_cnt );\r
-\r
- CL_ASSERT( !(ioc_slot >> 8) );\r
- p_ioc->slot = (uint8_t)ioc_slot;\r
- p_ioc->profile = *p_profile;\r
- p_ioc->num_valid_entries = 0;\r
- p_ioc->p_svc_entries = p_svc_entries;\r
- cl_atomic_inc( &p_ioc->ref_cnt );\r
- return p_ioc;\r
-}\r
-\r
-\r
-static void\r
-__put_ioc(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr,\r
- IN iou_ioc_t* const p_ioc )\r
-{\r
- if( cl_atomic_dec( &p_ioc->ref_cnt ) == 0 )\r
- {\r
- cl_free( p_ioc->p_svc_entries );\r
-\r
- cl_spinlock_acquire( &p_ioc_mgr->ioc_pool_lock );\r
- cl_qpool_put( &p_ioc_mgr->ioc_pool, &p_ioc->map_item.pool_item );\r
- cl_spinlock_release( &p_ioc_mgr->ioc_pool_lock );\r
- }\r
-}\r
-\r
-\r
-static void\r
-__put_ioc_map(\r
- IN ioc_pnp_mgr_t* const p_ioc_mgr,\r
- IN cl_qmap_t* const p_ioc_map )\r
-{\r
- cl_qlist_t list;\r
- cl_map_item_t *p_item;\r
- iou_ioc_t *p_ioc;\r
-\r
- cl_qlist_init( &list );\r
-\r
- p_item = cl_qmap_head( p_ioc_map );\r
- while( p_item != cl_qmap_end( p_ioc_map ) )\r
- {\r
- cl_qmap_remove_item( p_ioc_map, p_item );\r
-\r
- p_ioc = PARENT_STRUCT(\r
- PARENT_STRUCT( p_item, cl_map_item_t, pool_item ),\r
- iou_ioc_t, map_item );\r
- \r
- if( cl_atomic_dec( &p_ioc->ref_cnt ) == 0 )\r
- {\r
- cl_free( p_ioc->p_svc_entries );\r
- cl_qlist_insert_head( &list, &p_item->pool_item.list_item );\r
- }\r
- p_item = cl_qmap_head( p_ioc_map );\r
- }\r
- cl_spinlock_acquire( &p_ioc_mgr->ioc_pool_lock );\r
- cl_qpool_put_list( &p_ioc_mgr->ioc_pool, &list );\r
- cl_spinlock_release( &p_ioc_mgr->ioc_pool_lock );\r
-}\r
-\r
-\r
-/*\r
- * Compares two IOUs for inserts/lookups in a flexi map. Keys are the\r
- * address of the ca_guid, which is adjacent to the node GUID of the IOU.\r
- * This allows for a single call to cl_memcmp.\r
- */\r
-static intn_t\r
-__iou_cmp(\r
- IN const void* const p_key1,\r
- IN const void* const p_key2 )\r
-{\r
- return cl_memcmp( p_key1, p_key2, sizeof(uint64_t) * 2 );\r
-}\r
-\r
-\r
-/*\r
- * Compares two paths for inserts/lookups in a flexi map.\r
- */\r
-static intn_t\r
-__path_cmp(\r
- IN const void* const p_key1,\r
- IN const void* const p_key2 )\r
-{\r
- return cl_memcmp( p_key1, p_key2, sizeof(ib_path_rec_t) );\r
-}\r
-\r
-\r
-/*\r
- * Removes all paths and orphaned IOC/IOUs upon a port DOWN event.\r
- */\r
-static void\r
-__process_port_down(\r
- IN const net64_t port_guid )\r
-{\r
- cl_fmap_item_t *p_path_item;\r
- cl_fmap_item_t *p_iou_item;\r
- iou_node_t *p_iou;\r
- iou_path_t *p_path;\r
- cl_fmap_t old_paths;\r
- cl_fmap_t old_ious;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- cl_fmap_init( &old_paths, __path_cmp );\r
- cl_fmap_init( &old_ious, __iou_cmp );\r
-\r
- p_iou_item = cl_fmap_head( &gp_ioc_pnp->iou_map );\r
- while( p_iou_item != cl_fmap_end( &gp_ioc_pnp->iou_map ) )\r
- {\r
- p_iou = PARENT_STRUCT( p_iou_item, iou_node_t, map_item );\r
- /*\r
- * Note that it is safe to move to the next item even if we remove\r
- * the IOU from the map since the map effectively maintains an ordered\r
- * list of its contents.\r
- */\r
- p_iou_item = cl_fmap_next( p_iou_item );\r
-\r
- p_path_item = cl_fmap_head( &p_iou->path_map );\r
- while( p_path_item != cl_fmap_end( &p_iou->path_map ) )\r
- {\r
- p_path = PARENT_STRUCT( p_path_item, iou_path_t, map_item );\r
- p_path_item = cl_fmap_next( p_path_item );\r
- if( p_path->rec.sgid.unicast.interface_id == port_guid )\r
- {\r
- cl_fmap_remove_item( &p_iou->path_map, &p_path->map_item );\r
- cl_fmap_insert( &old_paths, &p_path->rec, &p_path->map_item );\r
- }\r
- }\r
-\r
- if( !cl_fmap_count( &p_iou->path_map ) )\r
- {\r
- /* Move the paths back to the IOU so that they get freed. */\r
- cl_fmap_merge( &p_iou->path_map, &old_paths );\r
- cl_fmap_remove_item( &gp_ioc_pnp->iou_map, &p_iou->map_item );\r
- cl_fmap_insert( &old_ious, &p_iou->ca_guid, &p_iou->map_item );\r
- }\r
- else\r
- {\r
- /* Report the removed paths. */\r
- __remove_paths( &p_iou->ioc_map, &old_paths );\r
- }\r
- }\r
-\r
- /* Report any removed IOUs. */\r
- __remove_ious( &old_ious );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-/*\r
- * PnP callback for port event notifications.\r
- */\r
-static ib_api_status_t\r
-__ioc_pnp_cb(\r
- IN ib_pnp_rec_t *p_pnp_rec )\r
-{\r
- ib_api_status_t status = IB_SUCCESS;\r
- cl_status_t cl_status;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP,\r
- ("p_pnp_rec->pnp_event = 0x%x (%s)\n",\r
- p_pnp_rec->pnp_event, ib_get_pnp_event_str( p_pnp_rec->pnp_event )) );\r
-\r
- switch( p_pnp_rec->pnp_event )\r
- {\r
- case IB_PNP_PORT_ADD:\r
- /* Create the port service. */\r
- CL_ASSERT( !p_pnp_rec->context );\r
- status = __create_ioc_pnp_svc( p_pnp_rec );\r
- break;\r
-\r
- case IB_PNP_SM_CHANGE:\r
- case IB_PNP_PORT_ACTIVE:\r
- /* Initiate a sweep - delay a bit to allow the ports to come up. */\r
- if( g_ioc_poll_interval && !gp_ioc_pnp->query_cnt)\r
- {\r
- cl_status = cl_timer_start( &gp_ioc_pnp->sweep_timer, 250 );\r
- CL_ASSERT( cl_status == CL_SUCCESS );\r
- }\r
- break;\r
-\r
- case IB_PNP_PORT_DOWN:\r
- case IB_PNP_PORT_INIT:\r
- case IB_PNP_PORT_ARMED:\r
- CL_ASSERT( p_pnp_rec->context );\r
-\r
- /*\r
- * Report IOC and IOU remove events for any IOU/IOCs that only have\r
- * paths through this port. Note, no need to synchronize with a\r
- * sweep since synchronization is provided by the PnP thread.\r
- */\r
- __process_port_down( p_pnp_rec->guid );\r
- break;\r
-\r
- case IB_PNP_PORT_REMOVE:\r
- /* Destroy the port service. */\r
- ref_al_obj( &((ioc_pnp_svc_t* __ptr64)p_pnp_rec->context)->obj );\r
- ((ioc_pnp_svc_t* __ptr64)p_pnp_rec->context)->obj.pfn_destroy(\r
- &((ioc_pnp_svc_t* __ptr64)p_pnp_rec->context)->obj, NULL );\r
- p_pnp_rec->context = NULL;\r
-\r
- default:\r
- break; /* Ignore other PNP events. */\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return status;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__init_ioc_pnp_svc(\r
- IN ioc_pnp_svc_t* const p_ioc_pnp_svc,\r
- IN const ib_pnp_rec_t* const p_pnp_rec )\r
-{\r
- ib_api_status_t status;\r
- ib_ca_handle_t h_ca;\r
- ib_qp_create_t qp_create;\r
- ib_mad_svc_t mad_svc;\r
- ib_pnp_port_rec_t *p_pnp_port_rec;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_pnp_port_rec = PARENT_STRUCT( p_pnp_rec, ib_pnp_port_rec_t, pnp_rec );\r
-\r
- /* Store the CA and port GUID so we can issue SA queries. */\r
- p_ioc_pnp_svc->ca_guid = p_pnp_port_rec->p_ca_attr->ca_guid;\r
- p_ioc_pnp_svc->port_guid = p_pnp_rec->guid;\r
-\r
- /* Acquire the correct CI CA for this port. */\r
- h_ca = acquire_ca( p_pnp_port_rec->p_ca_attr->ca_guid );\r
- if( !h_ca )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("acquire_ca failed.\n") );\r
- return IB_INVALID_GUID;\r
- }\r
- p_ioc_pnp_svc->obj.p_ci_ca = h_ca->obj.p_ci_ca;\r
-\r
- /* Create the MAD QP. */\r
- cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
- qp_create.qp_type = IB_QPT_QP1_ALIAS;\r
- qp_create.sq_depth = p_pnp_port_rec->p_ca_attr->max_wrs;\r
- qp_create.sq_sge = 1;\r
- qp_create.sq_signaled = TRUE;\r
- /*\r
- * We use the IOC PnP service's al_obj_t as the context to allow using\r
- * deref_al_obj as the destroy callback.\r
- */\r
- status = ib_get_spl_qp( h_ca->obj.p_ci_ca->h_pd_alias,\r
- p_pnp_port_rec->p_port_attr->port_guid, &qp_create,\r
- &p_ioc_pnp_svc->obj, NULL, &p_ioc_pnp_svc->pool_key,\r
- &p_ioc_pnp_svc->h_qp );\r
-\r
- /*\r
- * Release the CI CA once we've allocated the QP. The CI CA will not\r
- * go away while we hold the QP.\r
- */\r
- deref_al_obj( &h_ca->obj );\r
-\r
- /* Check for failure allocating the QP. */\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_get_spl_qp failed with status %s\n",\r
- ib_get_err_str( status )) );\r
- return status;\r
- }\r
- /* Reference the port object on behalf of the QP. */\r
- ref_al_obj( &p_ioc_pnp_svc->obj );\r
-\r
- /* Create the MAD service. */\r
- cl_memclr( &mad_svc, sizeof(ib_mad_svc_t) );\r
- mad_svc.mad_svc_context = p_ioc_pnp_svc;\r
- mad_svc.pfn_mad_recv_cb = __ioc_pnp_recv_cb;\r
- mad_svc.pfn_mad_send_cb = __ioc_pnp_send_cb;\r
- status =\r
- ib_reg_mad_svc( p_ioc_pnp_svc->h_qp, &mad_svc,\r
- &p_ioc_pnp_svc->h_mad_svc );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_reg_mad_svc failed with status %s\n",\r
- ib_get_err_str( status )) );\r
- return status;\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Create a port agent for a given port.\r
- */\r
-static ib_api_status_t\r
-__create_ioc_pnp_svc(\r
- IN ib_pnp_rec_t *p_pnp_rec )\r
-{\r
- ioc_pnp_svc_t *p_ioc_pnp_svc;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- /* calculate size of port_cm struct */\r
- p_ioc_pnp_svc = (ioc_pnp_svc_t*)cl_zalloc( sizeof(ioc_pnp_svc_t) );\r
- if( !p_ioc_pnp_svc )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Failed to cl_zalloc port CM agent.\n") );\r
- return IB_INSUFFICIENT_MEMORY;\r
- }\r
-\r
- construct_al_obj( &p_ioc_pnp_svc->obj, AL_OBJ_TYPE_IOC_PNP_SVC );\r
-\r
- status = init_al_obj( &p_ioc_pnp_svc->obj, p_ioc_pnp_svc, TRUE,\r
- __destroying_ioc_pnp_svc, NULL, __free_ioc_pnp_svc );\r
- if( status != IB_SUCCESS )\r
- {\r
- __free_ioc_pnp_svc( &p_ioc_pnp_svc->obj );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("init_al_obj failed with status %s.\n",\r
- ib_get_err_str( status )) );\r
- return status;\r
- }\r
-\r
- /* Attach to the global CM object. */\r
- status = attach_al_obj( &gp_ioc_pnp->obj, &p_ioc_pnp_svc->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_ioc_pnp_svc->obj.pfn_destroy( &p_ioc_pnp_svc->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- status = __init_ioc_pnp_svc( p_ioc_pnp_svc, p_pnp_rec );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_ioc_pnp_svc->obj.pfn_destroy( &p_ioc_pnp_svc->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("__init_data_svc failed with status %s.\n",\r
- ib_get_err_str( status )) );\r
- return status;\r
- }\r
-\r
- /* Set the PnP context to reference this service. */\r
- p_pnp_rec->context = p_ioc_pnp_svc;\r
-\r
- /* Release the reference taken in init_al_obj. */\r
- deref_al_obj( &p_ioc_pnp_svc->obj );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return status;\r
-}\r
-\r
-\r
-static void\r
-__destroying_ioc_pnp_svc(\r
- IN al_obj_t *p_obj )\r
-{\r
- ib_api_status_t status;\r
- ioc_pnp_svc_t *p_svc;\r
-\r
- CL_ASSERT( p_obj );\r
- p_svc = PARENT_STRUCT( p_obj, ioc_pnp_svc_t, obj );\r
-\r
- if( p_svc->h_node_query )\r
- ib_cancel_query( gh_al, p_svc->h_node_query );\r
-\r
- if( p_svc->h_path_query )\r
- ib_cancel_query( gh_al, p_svc->h_path_query );\r
-\r
- /* Destroy the QP. */\r
- if( p_svc->h_qp )\r
- {\r
- status =\r
- ib_destroy_qp( p_svc->h_qp, (ib_pfn_destroy_cb_t)deref_al_obj );\r
- CL_ASSERT( status == IB_SUCCESS );\r
- }\r
-}\r
-\r
-\r
-static void\r
-__free_ioc_pnp_svc(\r
- IN al_obj_t *p_obj )\r
-{\r
- ioc_pnp_svc_t* p_svc;\r
-\r
- CL_ASSERT( p_obj );\r
- p_svc = PARENT_STRUCT( p_obj, ioc_pnp_svc_t, obj );\r
-\r
- CL_ASSERT( !p_svc->query_cnt );\r
-\r
- destroy_al_obj( p_obj );\r
- cl_free( p_svc );\r
-}\r
-\r
-\r
-static void\r
-__ioc_pnp_timer_cb(\r
- IN void *context )\r
-{\r
- ib_api_status_t status;\r
- ioc_pnp_mgr_t *p_mgr;\r
- cl_list_item_t *p_item;\r
- ioc_pnp_svc_t *p_svc;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_mgr = (ioc_pnp_mgr_t*)context;\r
-\r
- cl_spinlock_acquire( &p_mgr->obj.lock );\r
- if( p_mgr->obj.state == CL_DESTROYING )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP,\r
- ("Destroying - not resetting timer.\n") );\r
- cl_spinlock_release( &p_mgr->obj.lock );\r
- return;\r
- }\r
-\r
- CL_ASSERT( !cl_fmap_count( &p_mgr->sweep_map ) );\r
-\r
- /* Pre-charge the ref count so that we don't toggle between 0 and 1. */\r
- cl_atomic_inc( &p_mgr->query_cnt );\r
- /* Take a reference on the object for the duration of the sweep process. */\r
- ref_al_obj( &p_mgr->obj );\r
- for( p_item = cl_qlist_head( &p_mgr->obj.obj_list );\r
- p_item != cl_qlist_end( &p_mgr->obj.obj_list );\r
- p_item = cl_qlist_next( p_item ) )\r
- {\r
- p_svc = PARENT_STRUCT( PARENT_STRUCT( p_item, al_obj_t, pool_item ),\r
- ioc_pnp_svc_t, obj );\r
- cl_atomic_inc( &p_mgr->query_cnt );\r
- status = __ioc_query_sa( p_svc );\r
- if( status != IB_SUCCESS )\r
- cl_atomic_dec( &p_mgr->query_cnt );\r
- }\r
- /* Release the reference we took and see if we're done sweeping. */\r
- if( !cl_atomic_dec( &p_mgr->query_cnt ) )\r
- cl_async_proc_queue( gp_async_pnp_mgr, &p_mgr->async_item );\r
-\r
- cl_spinlock_release( &p_mgr->obj.lock );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__ioc_query_sa(\r
- IN ioc_pnp_svc_t* const p_svc )\r
-{\r
- ib_api_status_t status = IB_NOT_DONE;\r
- ib_query_req_t query;\r
- ib_user_query_t info;\r
- union _ioc_pnp_timer_cb_u\r
- {\r
- ib_node_record_t node_rec;\r
- ib_path_rec_t path_rec;\r
-\r
- } u;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- if( p_svc->h_node_query )\r
- return IB_NOT_DONE;\r
- if( p_svc->h_path_query )\r
- return IB_NOT_DONE;\r
-\r
- if( p_svc->obj.state == CL_DESTROYING )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP,\r
- ("Destroying - not resetting timer.\n") );\r
- return IB_NOT_DONE;\r
- }\r
-\r
- info.method = IB_MAD_METHOD_GETTABLE;\r
- info.attr_id = IB_MAD_ATTR_NODE_RECORD;\r
- info.attr_size = sizeof(ib_node_record_t);\r
- info.comp_mask = IB_NR_COMPMASK_NODETYPE;\r
- info.p_attr = &u.node_rec;\r
-\r
- cl_memclr( &u.node_rec, sizeof(ib_node_record_t) );\r
- u.node_rec.node_info.node_type = IB_NODE_TYPE_CA;\r
-\r
- cl_memclr( &query, sizeof(ib_query_req_t) );\r
- query.query_type = IB_QUERY_USER_DEFINED;\r
- query.p_query_input = &info;\r
- query.port_guid = p_svc->port_guid;\r
- query.timeout_ms = g_ioc_query_timeout;\r
- query.retry_cnt = g_ioc_query_retries;\r
- query.query_context = p_svc;\r
- query.pfn_query_cb = __node_rec_cb;\r
-\r
- /* Reference the service for the node record query. */\r
- ref_al_obj( &p_svc->obj );\r
- cl_atomic_inc( &p_svc->query_cnt );\r
-\r
- status = ib_query( gh_al, &query, &p_svc->h_node_query );\r
- if( status != IB_SUCCESS )\r
- {\r
- cl_atomic_dec( &p_svc->query_cnt );\r
- deref_al_obj( &p_svc->obj );\r
- AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_PNP,\r
- ("ib_query returned %s\n", ib_get_err_str( status )) );\r
- return status;\r
- }\r
-\r
- /* Setup the path query. */\r
- info.method = IB_MAD_METHOD_GETTABLE;\r
- info.attr_id = IB_MAD_ATTR_PATH_RECORD;\r
- info.attr_size = sizeof(ib_path_rec_t);\r
- info.comp_mask = IB_PR_COMPMASK_SGID | IB_PR_COMPMASK_NUM_PATH;\r
- info.comp_mask = IB_PR_COMPMASK_SGID | IB_PR_COMPMASK_NUM_PATH | \r
- IB_PR_COMPMASK_PKEY;\r
- info.p_attr = &u.path_rec;\r
-\r
- cl_memclr( &u.path_rec, sizeof(ib_path_rec_t) );\r
- ib_gid_set_default( &u.path_rec.sgid, p_svc->port_guid );\r
- /* Request all the paths available, setting the reversible bit. */\r
- u.path_rec.num_path = 0xFF;\r
- /* Request only paths from the default partition */\r
- u.path_rec.pkey = cl_hton16(IB_DEFAULT_PKEY);\r
-\r
- query.pfn_query_cb = __path_rec_cb;\r
-\r
- /* Reference the service for the node record query. */\r
- ref_al_obj( &p_svc->obj );\r
- cl_atomic_inc( &p_svc->query_cnt );\r
-\r
- status = ib_query( gh_al, &query, &p_svc->h_path_query );\r
- if( status != IB_SUCCESS )\r
- {\r
- cl_atomic_dec( &p_svc->query_cnt );\r
- deref_al_obj( &p_svc->obj );\r
- AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_PNP,\r
- ("ib_query returned %s\n", ib_get_err_str( status )) );\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-static void\r
-__node_rec_cb(\r
- IN ib_query_rec_t *p_query_rec )\r
-{\r
- ioc_pnp_svc_t *p_svc;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_svc = (ioc_pnp_svc_t* __ptr64)p_query_rec->query_context;\r
-\r
- if( p_svc->obj.state != CL_DESTROYING &&\r
- p_query_rec->status == IB_SUCCESS && p_query_rec->result_cnt )\r
- {\r
- CL_ASSERT( p_query_rec->p_result_mad );\r
- CL_ASSERT( !p_svc->p_node_element );\r
- CL_ASSERT( p_query_rec->p_result_mad->p_next == NULL );\r
- p_svc->p_node_element = p_query_rec->p_result_mad;\r
- p_svc->num_nodes = p_query_rec->result_cnt;\r
- }\r
- else if( p_query_rec->p_result_mad )\r
- {\r
- ib_put_mad( p_query_rec->p_result_mad );\r
- }\r
-\r
- p_svc->h_node_query = NULL;\r
- if( !cl_atomic_dec( &p_svc->query_cnt ) )\r
- {\r
- /* The path query has already completed. Process the results. */\r
- __process_query( p_svc );\r
- }\r
-\r
- /* Release the reference taken for the query. */\r
- deref_al_obj( &p_svc->obj );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__path_rec_cb(\r
- IN ib_query_rec_t *p_query_rec )\r
-{\r
- ioc_pnp_svc_t *p_svc;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_svc = (ioc_pnp_svc_t* __ptr64)p_query_rec->query_context;\r
-\r
- if( p_svc->obj.state != CL_DESTROYING &&\r
- p_query_rec->status == IB_SUCCESS && p_query_rec->result_cnt )\r
- {\r
- CL_ASSERT( p_query_rec->p_result_mad );\r
- CL_ASSERT( !p_svc->p_path_element );\r
- CL_ASSERT( p_query_rec->p_result_mad->p_next == NULL );\r
- p_svc->p_path_element = p_query_rec->p_result_mad;\r
- p_svc->num_paths = p_query_rec->result_cnt;\r
- }\r
- else if( p_query_rec->p_result_mad )\r
- {\r
- ib_put_mad( p_query_rec->p_result_mad );\r
- }\r
-\r
- p_svc->h_path_query = NULL;\r
- if( !cl_atomic_dec( &p_svc->query_cnt ) )\r
- {\r
- /* The node query has already completed. Process the results. */\r
- __process_query( p_svc );\r
- }\r
-\r
- /* Release the reference taken for the query. */\r
- deref_al_obj( &p_svc->obj );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-static void\r
-__process_query(\r
- IN ioc_pnp_svc_t* const p_svc )\r
-{\r
- ib_api_status_t status;\r
- ioc_sweep_results_t *p_results;\r
- cl_qmap_t port_map;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- cl_qmap_init( &port_map );\r
-\r
- if( !p_svc->p_node_element || !p_svc->p_path_element )\r
- {\r
- /* One of the queries failed. Release the MADs and reset the timer. */\r
- if( p_svc->p_node_element )\r
- {\r
- ib_put_mad( p_svc->p_node_element );\r
- p_svc->p_node_element = NULL;\r
- }\r
-\r
- if( p_svc->p_path_element )\r
- {\r
- ib_put_mad( p_svc->p_path_element );\r
- p_svc->p_path_element = NULL;\r
- }\r
-\r
- /* Decrement the IOC PnP manager's query count. */\r
- if( !cl_atomic_dec( &gp_ioc_pnp->query_cnt ) )\r
- cl_async_proc_queue( gp_async_pnp_mgr, &gp_ioc_pnp->async_item );\r
- AL_EXIT( AL_DBG_PNP );\r
- return;\r
- }\r
-\r
- /*\r
- * Allocate the sweep results structure to allow processing\r
- * asynchronously.\r
- */\r
- p_results = cl_zalloc( sizeof(ioc_sweep_results_t) );\r
- if( p_results )\r
- {\r
- p_results->async_item.pfn_callback = __process_sweep;\r
- p_results->p_svc = p_svc;\r
- cl_fmap_init( &p_results->iou_map, __iou_cmp );\r
-\r
- /* Build the map of nodes by port GUID. */\r
- __process_nodes( p_svc, &port_map );\r
-\r
- /* Build the map of paths for each node. */\r
- __process_paths( p_svc, &port_map );\r
-\r
- /* Collapse the map of nodes to be keyed by node GUID. */\r
- __build_iou_map( &port_map, &p_results->iou_map );\r
-\r
- /* Send the IOU Info queries to the nodes. */\r
- status = __query_ious( p_results );\r
- }\r
- else\r
- {\r
- status = IB_INSUFFICIENT_MEMORY;\r
- }\r
-\r
- /* Release the query result MADs now that we're done with them. */\r
- ib_put_mad( p_svc->p_node_element );\r
- ib_put_mad( p_svc->p_path_element );\r
- p_svc->p_node_element = NULL;\r
- p_svc->p_path_element = NULL;\r
-\r
- switch( status )\r
- {\r
- case IB_SUCCESS:\r
- break;\r
- default:\r
- CL_ASSERT( p_results );\r
- cl_free( p_results );\r
- /* Fall through */\r
- case IB_INSUFFICIENT_MEMORY:\r
- /* Decrement the IOC PnP manager's query count. */\r
- if( !cl_atomic_dec( &gp_ioc_pnp->query_cnt ) )\r
- cl_async_proc_queue( gp_async_pnp_mgr, &gp_ioc_pnp->async_item );\r
- }\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__process_nodes(\r
- IN ioc_pnp_svc_t* const p_svc,\r
- IN cl_qmap_t* const p_port_map )\r
-{\r
- iou_node_t *p_iou;\r
- ib_node_record_t *p_node_rec;\r
- uint32_t i;\r
- void *p_item;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- CL_ASSERT( p_svc );\r
- CL_ASSERT( p_svc->p_node_element );\r
- CL_ASSERT( p_port_map );\r
-\r
- for( i = 0; i < p_svc->num_nodes; i++ )\r
- {\r
- p_node_rec = ib_get_query_node_rec( p_svc->p_node_element, i );\r
-\r
- p_iou = __get_iou( gp_ioc_pnp, p_svc->ca_guid, p_node_rec );\r
- if( !p_iou )\r
- break;\r
-\r
- /*\r
- * We insert by port GUID, not node GUID so that we can match\r
- * to paths using DGID. Note that it is safe to cast between\r
- * a flexi-map item and a map item since the pointer to the key\r
- * in a flexi-map item is always a 64-bit pointer.\r
- */\r
- p_item = cl_qmap_insert(\r
- p_port_map, p_node_rec->node_info.port_guid,\r
- (cl_map_item_t*)&p_iou->map_item );\r
- if( p_item != &p_iou->map_item )\r
- {\r
- /* Duplicate node - discard. */\r
- __put_iou( gp_ioc_pnp, p_iou );\r
- }\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__process_paths(\r
- IN ioc_pnp_svc_t* const p_svc,\r
- IN cl_qmap_t* const p_port_map )\r
-{\r
- iou_node_t *p_iou;\r
- iou_path_t *p_path;\r
- ib_path_rec_t *p_path_rec;\r
- uint32_t i;\r
- cl_map_item_t *p_iou_item;\r
- cl_fmap_item_t *p_item;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- CL_ASSERT( p_svc );\r
- CL_ASSERT( p_svc->p_node_element );\r
- CL_ASSERT( p_port_map );\r
-\r
- for( i = 0; i < p_svc->num_paths; i++ )\r
- {\r
- p_path_rec = ib_get_query_path_rec( p_svc->p_path_element, i );\r
-\r
- p_iou_item =\r
- cl_qmap_get( p_port_map, p_path_rec->dgid.unicast.interface_id );\r
- if( p_iou_item == cl_qmap_end( p_port_map ) )\r
- continue;\r
-\r
- p_iou = PARENT_STRUCT( p_iou_item, iou_node_t, map_item );\r
-\r
- p_path = __get_path( gp_ioc_pnp, p_svc->ca_guid,\r
- p_svc->port_guid, p_path_rec );\r
- if( !p_path )\r
- break;\r
-\r
- p_item = cl_fmap_insert( &p_iou->path_map, &p_path->rec,\r
- &p_path->map_item );\r
- if( p_item != &p_path->map_item )\r
- {\r
- /* Duplicate path - discard. */\r
- __put_path( gp_ioc_pnp, p_path );\r
- }\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__build_iou_map(\r
- IN cl_qmap_t* const p_port_map,\r
- IN OUT cl_fmap_t* const p_iou_map )\r
-{\r
- cl_fmap_t map1, map2;\r
- void *p_item;\r
- iou_node_t *p_iou, *p_dup;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- CL_ASSERT( !cl_fmap_count( p_iou_map ) );\r
-\r
- cl_fmap_init( &map1, __path_cmp );\r
- cl_fmap_init( &map2, __path_cmp );\r
-\r
- /*\r
- * Now collapse the map so that IOUs aren't repeated.\r
- * This is needed because the IOU map is keyed by port GUID, and thus\r
- * a multi-port IOU could be listed twice.\r
- */\r
- /* Merge the port map into a map of IOUs. */\r
- for( p_item = cl_qmap_head( p_port_map );\r
- p_item != cl_qmap_end( p_port_map );\r
- p_item = cl_qmap_head( p_port_map ) )\r
- {\r
- cl_qmap_remove_item( p_port_map, (cl_map_item_t*)p_item );\r
- p_iou = PARENT_STRUCT( p_item, iou_node_t, map_item );\r
-\r
- p_item = cl_fmap_insert( p_iou_map, &p_iou->ca_guid, p_item );\r
- if( p_item != &p_iou->map_item )\r
- {\r
- /* Duplicate IOU information - merge the paths. */\r
- p_dup = PARENT_STRUCT( p_item, iou_node_t, map_item );\r
- CL_ASSERT( p_dup != p_iou );\r
- cl_fmap_delta( &p_dup->path_map, &p_iou->path_map, &map1, &map2 );\r
- /*\r
- * The path map in p_iou->path_map is duplicate paths.\r
- * map1 contains paths unique to p_iou->path_map, map2 contains\r
- * paths unique to p_dup->path_map. Add the unique paths back to\r
- * p_dup->path_map since that IOU is already in the IOU map.\r
- * Note that we are keeping the p_dup IOU node.\r
- */\r
- cl_fmap_merge( &p_dup->path_map, &map1 );\r
- cl_fmap_merge( &p_dup->path_map, &map2 );\r
- /* All unique items should have merged without duplicates. */\r
- CL_ASSERT( !cl_fmap_count( &map1 ) );\r
- CL_ASSERT( !cl_fmap_count( &map2 ) );\r
-\r
- __put_iou( gp_ioc_pnp, p_iou );\r
- }\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__format_dm_get(\r
- IN const void* const context1,\r
- IN const void* const context2,\r
- IN const iou_path_t* const p_path,\r
- IN const net16_t attr_id,\r
- IN const net32_t attr_mod,\r
- IN OUT ib_mad_element_t* const p_mad_element )\r
-{\r
- static uint64_t tid = 0;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- /*\r
- * Context information so that we can continue processing when\r
- * the query completes.\r
- */\r
- p_mad_element->context1 = context1;\r
- p_mad_element->context2 = context2;\r
-\r
- /*\r
- * Set the addressing bits necessary for the mad service to\r
- * create the address vector\r
- */\r
- p_mad_element->h_av = NULL;\r
- p_mad_element->remote_sl = ib_path_rec_sl( &p_path->rec );\r
- p_mad_element->remote_lid = p_path->rec.dlid;\r
- p_mad_element->grh_valid = FALSE;\r
- p_mad_element->path_bits = p_path->rec.num_path;\r
-\r
- /* Request response processing. */\r
- p_mad_element->resp_expected = TRUE;\r
- p_mad_element->retry_cnt = g_ioc_query_retries;\r
- p_mad_element->timeout_ms = g_ioc_query_timeout;\r
-\r
- /* Set the destination information for the send. */\r
- p_mad_element->remote_qp = IB_QP1;\r
- p_mad_element->remote_qkey = IB_QP1_WELL_KNOWN_Q_KEY;\r
-\r
- /* Format the MAD payload. */\r
- cl_memclr( p_mad_element->p_mad_buf, sizeof(ib_dm_mad_t) );\r
- ib_mad_init_new( p_mad_element->p_mad_buf, IB_MCLASS_DEV_MGMT, 1,\r
- IB_MAD_METHOD_GET, cl_ntoh64( tid++ ), attr_id, attr_mod );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__query_ious(\r
- IN ioc_sweep_results_t* const p_results )\r
-{\r
- ib_api_status_t status;\r
- iou_node_t *p_iou;\r
- iou_path_t *p_path;\r
- cl_fmap_item_t *p_iou_item;\r
- cl_fmap_item_t *p_path_item;\r
- ib_mad_element_t *p_mad, *p_mad_list = NULL;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_results->state = SWEEP_IOU_INFO;\r
-\r
- /* Send a IOU Info query on the first path to every IOU. */\r
- p_iou_item = cl_fmap_head( &p_results->iou_map );\r
- while( p_iou_item != cl_fmap_end( &p_results->iou_map ) )\r
- {\r
- p_iou = PARENT_STRUCT( p_iou_item, iou_node_t, map_item );\r
- p_iou_item = cl_fmap_next( p_iou_item );\r
- if( !cl_fmap_count( &p_iou->path_map ) )\r
- {\r
- /* No paths for this node. Discard it. */\r
- cl_fmap_remove_item( &p_results->iou_map, &p_iou->map_item );\r
- __put_iou( gp_ioc_pnp, p_iou );\r
- continue;\r
- }\r
-\r
- p_path_item = cl_fmap_head( &p_iou->path_map );\r
-\r
- p_path = PARENT_STRUCT( p_path_item, iou_path_t, map_item );\r
-\r
- status = ib_get_mad( p_results->p_svc->pool_key,\r
- MAD_BLOCK_SIZE, &p_mad );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_get_mad for IOU Info query returned %s.\n",\r
- ib_get_err_str( status )) );\r
- break;\r
- }\r
-\r
- p_iou->p_config_path = p_path;\r
- __format_dm_get( p_results, p_iou, p_path,\r
- IB_MAD_ATTR_IO_UNIT_INFO, 0, p_mad );\r
-\r
- /* Link the elements together. */\r
- p_mad->p_next = p_mad_list;\r
- p_mad_list = p_mad;\r
-\r
- cl_atomic_inc( &p_results->p_svc->query_cnt );\r
- }\r
-\r
- if( !p_mad_list )\r
- {\r
- AL_EXIT( AL_DBG_PNP );\r
- return IB_ERROR;\r
- }\r
-\r
- status = ib_send_mad( p_results->p_svc->h_mad_svc, p_mad_list, &p_mad );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_send_mad returned %s\n", ib_get_err_str( status )) );\r
-\r
- /* If some sends succeeded, change the status. */\r
- if( p_mad_list != p_mad )\r
- status = IB_SUCCESS;\r
-\r
- while( p_mad )\r
- {\r
- p_mad_list = p_mad->p_next;\r
- p_mad->p_next = NULL;\r
- ib_put_mad( p_mad );\r
- if( !cl_atomic_dec( &p_results->p_svc->query_cnt ) &&\r
- status == IB_SUCCESS )\r
- {\r
- cl_async_proc_queue( gp_async_pnp_mgr,\r
- &p_results->async_item );\r
- }\r
- p_mad = p_mad_list;\r
- }\r
- }\r
- AL_EXIT( AL_DBG_PNP );\r
- return status;\r
-}\r
-\r
-\r
-static void\r
-__ioc_pnp_recv_cb(\r
- IN const ib_mad_svc_handle_t h_mad_svc,\r
- IN void *mad_svc_context,\r
- IN ib_mad_element_t *p_mad_response )\r
-{\r
- ioc_sweep_results_t *p_results;\r
- iou_node_t *p_iou;\r
- iou_ioc_t *p_ioc;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- UNUSED_PARAM( h_mad_svc );\r
- UNUSED_PARAM( mad_svc_context );\r
- CL_ASSERT( !p_mad_response->p_next );\r
-\r
- p_results = (ioc_sweep_results_t* __ptr64)p_mad_response->send_context1;\r
- if( !p_mad_response->p_mad_buf->status )\r
- {\r
- /* Query was successful */\r
- switch( p_mad_response->p_mad_buf->attr_id )\r
- {\r
- case IB_MAD_ATTR_IO_UNIT_INFO:\r
- p_iou = (iou_node_t* __ptr64)p_mad_response->send_context2;\r
- __iou_info_resp( p_iou,\r
- (ib_dm_mad_t*)p_mad_response->p_mad_buf );\r
- break;\r
-\r
- case IB_MAD_ATTR_IO_CONTROLLER_PROFILE:\r
- p_iou = (iou_node_t* __ptr64)p_mad_response->send_context2;\r
- __ioc_profile_resp( p_iou,\r
- (ib_dm_mad_t*)p_mad_response->p_mad_buf );\r
- break;\r
-\r
- case IB_MAD_ATTR_SERVICE_ENTRIES:\r
- p_ioc = (iou_ioc_t* __ptr64)p_mad_response->send_context2;\r
- __svc_entry_resp( p_ioc,\r
- (ib_dm_mad_t*)p_mad_response->p_mad_buf );\r
- break;\r
-\r
- default:\r
- break;\r
- }\r
- }\r
-\r
- ib_put_mad( p_mad_response );\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__iou_info_resp(\r
- IN OUT iou_node_t* const p_iou,\r
- IN const ib_dm_mad_t* const p_mad )\r
-{\r
- AL_ENTER( AL_DBG_PNP );\r
- /* Copy the IOU info for post-processing. */\r
- p_iou->info = *((ib_iou_info_t*)p_mad->data);\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__ioc_profile_resp(\r
- IN OUT iou_node_t* const p_iou,\r
- IN const ib_dm_mad_t* const p_mad )\r
-{\r
- iou_ioc_t *p_ioc;\r
- cl_map_item_t *p_item;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
- p_ioc = __get_ioc( gp_ioc_pnp, cl_ntoh32(p_mad->hdr.attr_mod),\r
- (ib_ioc_profile_t*)p_mad->data );\r
- if( p_ioc )\r
- {\r
- /* Need back link to process service entry failures. */\r
- p_ioc->p_iou = p_iou;\r
- cl_spinlock_acquire( &p_iou->lock );\r
- p_item = cl_qmap_insert( &p_iou->ioc_map,\r
- p_ioc->profile.ioc_guid, &p_ioc->map_item );\r
- cl_spinlock_release( &p_iou->lock );\r
- /* Return the IOC if it's a duplicate. */\r
- if( p_item != &p_ioc->map_item )\r
- __put_ioc( gp_ioc_pnp, p_ioc );\r
- }\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__svc_entry_resp(\r
- IN OUT iou_ioc_t* const p_ioc,\r
- IN const ib_dm_mad_t* const p_mad )\r
-{\r
- uint16_t idx;\r
- uint8_t lo, hi;\r
- ib_svc_entries_t *p_svc_entries;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- ib_dm_get_slot_lo_hi( p_mad->hdr.attr_mod, NULL, &lo, &hi );\r
- CL_ASSERT( (hi - lo) < SVC_ENTRY_COUNT );\r
- p_svc_entries = (ib_svc_entries_t*)p_mad->data;\r
-\r
- /* Copy the entries. */\r
- for( idx = lo; idx <= hi; idx++ )\r
- p_ioc->p_svc_entries[idx] = p_svc_entries->service_entry[idx - lo];\r
-\r
- /* Update the number of entries received so far. */\r
- p_ioc->num_valid_entries += (hi - lo) + 1;\r
- cl_atomic_dec(&p_ioc->ref_cnt);\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__ioc_pnp_send_cb(\r
- IN const ib_mad_svc_handle_t h_mad_svc,\r
- IN void *mad_svc_context,\r
- IN ib_mad_element_t *p_request_mad )\r
-{\r
- ib_api_status_t status;\r
- ioc_sweep_results_t *p_results;\r
- iou_node_t *p_iou;\r
- iou_ioc_t *p_ioc;\r
- cl_fmap_item_t *p_item;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- UNUSED_PARAM( h_mad_svc );\r
- UNUSED_PARAM( mad_svc_context );\r
-\r
- CL_ASSERT( p_request_mad->p_next == NULL );\r
-\r
- p_results = (ioc_sweep_results_t* __ptr64)p_request_mad->context1;\r
-\r
- if( p_request_mad->status != IB_WCS_SUCCESS )\r
- {\r
- switch( p_request_mad->p_mad_buf->attr_id )\r
- {\r
- case IB_MAD_ATTR_IO_UNIT_INFO:\r
- p_iou = (iou_node_t* __ptr64)p_request_mad->context2;\r
- if( p_request_mad->status == IB_WCS_TIMEOUT_RETRY_ERR )\r
- {\r
- /* Move to the next path for the node and try the query again. */\r
- p_item = cl_fmap_next( &p_iou->p_config_path->map_item );\r
- if( p_item != cl_fmap_end( &p_iou->path_map ) )\r
- {\r
- p_iou->p_config_path =\r
- PARENT_STRUCT( p_item, iou_path_t, map_item );\r
- __format_dm_get( p_results, p_iou, p_iou->p_config_path,\r
- IB_MAD_ATTR_IO_UNIT_INFO, 0, p_request_mad );\r
-\r
- status = ib_send_mad( p_results->p_svc->h_mad_svc,\r
- p_request_mad, &p_request_mad );\r
- if( status == IB_SUCCESS )\r
- {\r
- AL_EXIT( AL_DBG_PNP );\r
- return;\r
- }\r
- }\r
- }\r
- break;\r
-\r
- case IB_MAD_ATTR_SERVICE_ENTRIES:\r
- p_ioc = (iou_ioc_t* __ptr64)p_request_mad->context2;\r
- cl_spinlock_acquire( &p_ioc->p_iou->lock );\r
- cl_qmap_remove_item( &p_ioc->p_iou->ioc_map, &p_ioc->map_item );\r
- cl_spinlock_release( &p_ioc->p_iou->lock );\r
- __put_ioc( gp_ioc_pnp, p_ioc );\r
- break;\r
-\r
- default:\r
- break;\r
- }\r
- }\r
-\r
- /* Cleanup. */\r
- ib_put_mad( p_request_mad );\r
-\r
- /*\r
- * If this is the last MAD, finish processing the IOU queries\r
- * in the PnP thread.\r
- */\r
- if( !cl_atomic_dec( &p_results->p_svc->query_cnt ) )\r
- cl_async_proc_queue( gp_async_pnp_mgr, &p_results->async_item );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__flush_duds(\r
- IN OUT ioc_sweep_results_t *p_results )\r
-{\r
- cl_fmap_item_t *p_item;\r
- cl_map_item_t *p_ioc_item;\r
- iou_node_t *p_iou;\r
- iou_ioc_t *p_ioc;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- /* Walk the map of IOUs and discard any that didn't respond to IOU info. */\r
- p_item = cl_fmap_head( &p_results->iou_map );\r
- /*\r
- * No locking required since we're protected by the serialization of the\r
- * PnP thread.\r
- */\r
- while( p_item != cl_fmap_end( &p_results->iou_map ) )\r
- {\r
- p_iou = PARENT_STRUCT( p_item, iou_node_t, map_item );\r
-\r
- p_item = cl_fmap_next( p_item );\r
- switch( p_results->state )\r
- {\r
- case SWEEP_IOU_INFO:\r
- if( p_iou->info.max_controllers )\r
- continue;\r
- break;\r
-\r
- case SWEEP_SVC_ENTRIES:\r
- CL_ASSERT( cl_qmap_count( &p_iou->ioc_map ) );\r
- p_ioc_item = cl_qmap_head( &p_iou->ioc_map );\r
- while( p_ioc_item != cl_qmap_end( &p_iou->ioc_map ) )\r
- {\r
- p_ioc = PARENT_STRUCT( p_ioc_item, iou_ioc_t, map_item );\r
- p_ioc_item = cl_qmap_next( p_ioc_item );\r
-\r
- if( !p_ioc->num_valid_entries ||\r
- p_ioc->num_valid_entries != p_ioc->profile.num_svc_entries )\r
- {\r
- cl_qmap_remove_item( &p_iou->ioc_map, &p_ioc->map_item );\r
- __put_ioc( gp_ioc_pnp, p_ioc );\r
- }\r
- }\r
- /* Fall through. */\r
- case SWEEP_IOC_PROFILE:\r
- if( cl_qmap_count( &p_iou->ioc_map ) )\r
- continue;\r
- break;\r
-\r
- default:\r
- CL_ASSERT( p_results->state != SWEEP_COMPLETE );\r
- break;\r
- }\r
-\r
- cl_fmap_remove_item( &p_results->iou_map, &p_iou->map_item );\r
- __put_iou( gp_ioc_pnp, p_iou );\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-static void\r
-__process_sweep(\r
- IN cl_async_proc_item_t *p_async_item )\r
-{\r
- ib_api_status_t status;\r
- ioc_sweep_results_t *p_results;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_results = PARENT_STRUCT( p_async_item, ioc_sweep_results_t, async_item );\r
- CL_ASSERT( !p_results->p_svc->query_cnt );\r
-\r
- if( p_results->p_svc->obj.state == CL_DESTROYING )\r
- {\r
- __put_iou_map( gp_ioc_pnp, &p_results->iou_map );\r
- goto err;\r
- }\r
-\r
- /* Walk the map of IOUs and discard any that didn't respond to IOU info. */\r
- __flush_duds( p_results );\r
- switch( p_results->state )\r
- {\r
- case SWEEP_IOU_INFO:\r
- /* Next step, query IOC profiles for all IOUs. */\r
- p_results->state = SWEEP_IOC_PROFILE;\r
- status = __query_ioc_profiles( p_results );\r
- break;\r
-\r
- case SWEEP_IOC_PROFILE:\r
- /* Next step: query service entries for all IOCs. */\r
- p_results->state = SWEEP_SVC_ENTRIES;\r
- status = __query_svc_entries( p_results );\r
- break;\r
-\r
- case SWEEP_SVC_ENTRIES:\r
- /* Filter results and report changes. */\r
- p_results->state = SWEEP_COMPLETE;\r
- __update_results( p_results );\r
- status = IB_SUCCESS;\r
- break;\r
-\r
- default:\r
- CL_ASSERT( p_results->state == SWEEP_IOU_INFO ||\r
- p_results->state == SWEEP_IOC_PROFILE ||\r
- p_results->state == SWEEP_SVC_ENTRIES );\r
- status = IB_ERROR;\r
- }\r
-\r
- if( p_results->state == SWEEP_COMPLETE || status != IB_SUCCESS )\r
- {\r
-err:\r
- if( !cl_atomic_dec( &gp_ioc_pnp->query_cnt ) )\r
- cl_async_proc_queue( gp_async_pnp_mgr, &gp_ioc_pnp->async_item );\r
- cl_free( p_results );\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__query_ioc_profiles(\r
- IN ioc_sweep_results_t* const p_results )\r
-{\r
- ib_api_status_t status;\r
- cl_fmap_item_t *p_item;\r
- iou_node_t *p_iou;\r
- uint8_t slot;\r
- ib_mad_element_t *p_mad, *p_mad_list = NULL;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_item = cl_fmap_head( &p_results->iou_map );\r
- while( p_item != cl_fmap_end( &p_results->iou_map ) )\r
- {\r
- p_iou = PARENT_STRUCT( p_item, iou_node_t, map_item );\r
- CL_ASSERT( p_iou->info.max_controllers );\r
- CL_ASSERT( cl_fmap_count( &p_iou->path_map ) );\r
- CL_ASSERT( p_iou->p_config_path );\r
- p_item = cl_fmap_next( p_item );\r
-\r
- p_mad = NULL;\r
- for( slot = 1; slot <= p_iou->info.max_controllers; slot++ )\r
- {\r
- if( ioc_at_slot( &p_iou->info, slot ) == IOC_INSTALLED )\r
- {\r
- status = ib_get_mad( p_results->p_svc->pool_key,\r
- MAD_BLOCK_SIZE, &p_mad );\r
- if( status != IB_SUCCESS )\r
- break;\r
-\r
- __format_dm_get( p_results, p_iou, p_iou->p_config_path,\r
- IB_MAD_ATTR_IO_CONTROLLER_PROFILE, cl_hton32( slot ), p_mad );\r
-\r
- /* Chain the MAD up. */\r
- p_mad->p_next = p_mad_list;\r
- p_mad_list = p_mad;\r
-\r
- cl_atomic_inc( &p_results->p_svc->query_cnt );\r
- }\r
- }\r
- if( !p_mad )\r
- {\r
- /* No IOCs installed in this IOU, or failed to get MAD. */\r
- cl_fmap_remove_item( &p_results->iou_map, &p_iou->map_item );\r
- __put_iou( gp_ioc_pnp, p_iou );\r
- }\r
- }\r
-\r
- /* Trap the case where there are no queries to send. */\r
- if( !p_mad_list )\r
- {\r
- AL_EXIT( AL_DBG_PNP );\r
- return IB_NOT_DONE;\r
- }\r
-\r
- status = ib_send_mad( p_results->p_svc->h_mad_svc, p_mad_list, &p_mad );\r
- if( status != IB_SUCCESS )\r
- {\r
- /* If some of the MADs were sent wait for their completion. */\r
- if( p_mad_list != p_mad )\r
- status = IB_SUCCESS;\r
-\r
- while( p_mad )\r
- {\r
- p_mad_list = p_mad->p_next;\r
- p_mad->p_next = NULL;\r
- ib_put_mad( p_mad );\r
- if( !cl_atomic_dec( &p_results->p_svc->query_cnt ) &&\r
- status == IB_SUCCESS )\r
- {\r
- cl_async_proc_queue( gp_async_pnp_mgr,\r
- &p_results->async_item );\r
- }\r
- p_mad = p_mad_list;\r
- }\r
- }\r
- AL_EXIT( AL_DBG_PNP );\r
- return status;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__query_svc_entries(\r
- IN ioc_sweep_results_t* const p_results )\r
-{\r
- ib_api_status_t status;\r
- cl_fmap_item_t *p_iou_item;\r
- cl_map_item_t *p_ioc_item;\r
- iou_node_t *p_iou;\r
- iou_ioc_t *p_ioc;\r
- uint8_t i;\r
- uint32_t attr_mod;\r
- ib_mad_element_t *p_mad, *p_mad_list = NULL;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- for( p_iou_item = cl_fmap_head( &p_results->iou_map );\r
- p_iou_item != cl_fmap_end( &p_results->iou_map );\r
- p_iou_item = cl_fmap_next( p_iou_item ) )\r
- {\r
- p_iou = PARENT_STRUCT( p_iou_item, iou_node_t, map_item );\r
- CL_ASSERT( cl_qmap_count( &p_iou->ioc_map ) );\r
- CL_ASSERT( cl_fmap_count( &p_iou->path_map ) );\r
- CL_ASSERT( p_iou->p_config_path );\r
-\r
- for( p_ioc_item = cl_qmap_head( &p_iou->ioc_map );\r
- p_ioc_item != cl_qmap_end( &p_iou->ioc_map );\r
- p_ioc_item = cl_qmap_next( p_ioc_item ) )\r
- {\r
- p_ioc = PARENT_STRUCT( p_ioc_item, iou_ioc_t, map_item );\r
- CL_ASSERT( p_ioc->p_iou == p_iou );\r
-\r
- for( i = 0; i < p_ioc->profile.num_svc_entries; i += 4 )\r
- {\r
- status = ib_get_mad( p_results->p_svc->pool_key,\r
- MAD_BLOCK_SIZE, &p_mad );\r
- if( status != IB_SUCCESS )\r
- break;\r
-\r
- attr_mod = (((uint32_t)p_ioc->slot) << 16) | i;\r
- if( (i + 3) > p_ioc->profile.num_svc_entries )\r
- attr_mod |= ((p_ioc->profile.num_svc_entries - 1) << 8);\r
- else\r
- attr_mod |= ((i + 3) << 8);\r
-\r
- __format_dm_get( p_results, p_ioc, p_iou->p_config_path,\r
- IB_MAD_ATTR_SERVICE_ENTRIES, cl_hton32( attr_mod ),\r
- p_mad );\r
-\r
- /* Chain the MAD up. */\r
- p_mad->p_next = p_mad_list;\r
- p_mad_list = p_mad;\r
-\r
- cl_atomic_inc( &p_ioc->ref_cnt );\r
- cl_atomic_inc( &p_results->p_svc->query_cnt );\r
- }\r
- }\r
- }\r
-\r
- /* Trap the case where there are no queries to send. */\r
- if( !p_mad_list )\r
- {\r
- AL_EXIT( AL_DBG_PNP );\r
- return IB_NOT_DONE;\r
- }\r
-\r
- status = ib_send_mad( p_results->p_svc->h_mad_svc, p_mad_list, &p_mad );\r
- if( status != IB_SUCCESS )\r
- {\r
- /* If some of the MADs were sent wait for their completion. */\r
- if( p_mad_list != p_mad )\r
- status = IB_SUCCESS;\r
-\r
- while( p_mad )\r
- {\r
- p_mad_list = p_mad->p_next;\r
- p_mad->p_next = NULL;\r
- p_ioc = (iou_ioc_t* __ptr64)p_mad->context2;\r
- cl_atomic_dec( &p_ioc->ref_cnt );\r
- ib_put_mad( p_mad );\r
- if( !cl_atomic_dec( &p_results->p_svc->query_cnt ) &&\r
- status == IB_SUCCESS )\r
- {\r
- cl_async_proc_queue( gp_async_pnp_mgr,\r
- &p_results->async_item );\r
- }\r
- p_mad = p_mad_list;\r
- }\r
- }\r
- AL_EXIT( AL_DBG_PNP );\r
- return status;\r
-}\r
-\r
-\r
-static void\r
-__update_results(\r
- IN ioc_sweep_results_t* const p_results )\r
-{\r
- cl_fmap_t iou_map1, iou_map2;\r
- cl_fmap_item_t *p_item1, *p_item2;\r
- iou_node_t *p_iou1, *p_iou2;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- cl_fmap_init( &iou_map1, __iou_cmp );\r
- cl_fmap_init( &iou_map2, __iou_cmp );\r
-\r
- /*\r
- * No need to lock on the sweep map since all accesses are serialized\r
- * by the PnP thread.\r
- */\r
- cl_fmap_delta( &gp_ioc_pnp->sweep_map, &p_results->iou_map,\r
- &iou_map1, &iou_map2 );\r
- /* sweep_map and iou_map now contain exactly the same items. */\r
- p_item1 = cl_fmap_head( &gp_ioc_pnp->sweep_map );\r
- p_item2 = cl_fmap_head( &p_results->iou_map );\r
- while( p_item1 != cl_fmap_end( &gp_ioc_pnp->sweep_map ) )\r
- {\r
- CL_ASSERT( p_item2 != cl_fmap_end( &p_results->iou_map ) );\r
- p_iou1 = PARENT_STRUCT( p_item1, iou_node_t, map_item );\r
- p_iou2 = PARENT_STRUCT( p_item2, iou_node_t, map_item );\r
- CL_ASSERT( p_iou1->guid == p_iou2->guid );\r
-\r
- /*\r
- * Merge the IOC maps - this leaves all duplicates in\r
- * p_iou2->ioc_map.\r
- */\r
- cl_qmap_merge( &p_iou1->ioc_map, &p_iou2->ioc_map );\r
-\r
- /*\r
- * Merge the path maps - this leaves all duplicates in\r
- * p_iou2->path_map\r
- */\r
- cl_fmap_merge( &p_iou1->path_map, &p_iou2->path_map );\r
-\r
- /* Return the duplicate IOU (and whatever duplicate paths and IOCs) */\r
- cl_fmap_remove_item( &p_results->iou_map, p_item2 );\r
- __put_iou( gp_ioc_pnp, p_iou2 );\r
-\r
- p_item1 = cl_fmap_next( p_item1 );\r
- p_item2 = cl_fmap_head( &p_results->iou_map );\r
- }\r
- CL_ASSERT( !cl_fmap_count( &p_results->iou_map ) );\r
-\r
- /* Merge in the unique items. */\r
- cl_fmap_merge( &gp_ioc_pnp->sweep_map, &iou_map1 );\r
- CL_ASSERT( !cl_fmap_count( &iou_map1 ) );\r
- cl_fmap_merge( &gp_ioc_pnp->sweep_map, &iou_map2 );\r
- CL_ASSERT( !cl_fmap_count( &iou_map2 ) );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return;\r
-}\r
-\r
-\r
-static void\r
-__ioc_async_cb(\r
- IN cl_async_proc_item_t *p_item )\r
-{\r
- cl_status_t status;\r
- cl_fmap_t old_ious, new_ious;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- CL_ASSERT( p_item == &gp_ioc_pnp->async_item );\r
- UNUSED_PARAM( p_item );\r
-\r
- CL_ASSERT( !gp_ioc_pnp->query_cnt );\r
-\r
- cl_fmap_init( &old_ious, __iou_cmp );\r
- cl_fmap_init( &new_ious, __iou_cmp );\r
- cl_fmap_delta(\r
- &gp_ioc_pnp->iou_map, &gp_ioc_pnp->sweep_map, &new_ious, &old_ious );\r
-\r
- /* For each duplicate IOU, report changes in IOCs or paths. */\r
- __change_ious( &gp_ioc_pnp->iou_map, &gp_ioc_pnp->sweep_map );\r
-\r
- /* Report all new IOUs. */\r
- __add_ious( &gp_ioc_pnp->iou_map, &new_ious, NULL );\r
- CL_ASSERT( !cl_fmap_count( &new_ious ) );\r
-\r
- /* Report all removed IOUs. */\r
- __remove_ious( &old_ious );\r
- CL_ASSERT( !cl_fmap_count( &old_ious ) );\r
-\r
- /* Reset the sweep timer. */\r
- if( g_ioc_poll_interval )\r
- {\r
- status = cl_timer_start(\r
- &gp_ioc_pnp->sweep_timer, g_ioc_poll_interval );\r
- CL_ASSERT( status == CL_SUCCESS );\r
- }\r
-\r
- /* Release the reference we took in the timer callback. */\r
- deref_al_obj( &gp_ioc_pnp->obj );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__change_ious(\r
- IN cl_fmap_t* const p_cur_ious,\r
- IN cl_fmap_t* const p_dup_ious )\r
-{\r
- cl_fmap_t new_paths, old_paths;\r
- cl_qmap_t new_iocs, old_iocs;\r
- cl_fmap_item_t *p_item1, *p_item2;\r
- iou_node_t *p_iou1, *p_iou2;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- cl_fmap_init( &new_paths, __path_cmp );\r
- cl_fmap_init( &old_paths, __path_cmp );\r
- cl_qmap_init( &new_iocs );\r
- cl_qmap_init( &old_iocs );\r
-\r
- p_item1 = cl_fmap_head( p_cur_ious );\r
- p_item2 = cl_fmap_head( p_dup_ious );\r
- while( p_item1 != cl_fmap_end( p_cur_ious ) )\r
- {\r
- p_iou1 = PARENT_STRUCT( p_item1, iou_node_t, map_item );\r
- p_iou2 = PARENT_STRUCT( p_item2, iou_node_t, map_item );\r
- CL_ASSERT( p_iou1->guid == p_iou2->guid );\r
-\r
- /* Figure out what changed. */\r
- cl_fmap_delta(\r
- &p_iou1->path_map, &p_iou2->path_map, &new_paths, &old_paths );\r
- cl_qmap_delta(\r
- &p_iou1->ioc_map, &p_iou2->ioc_map, &new_iocs, &old_iocs );\r
-\r
- /*\r
- * Report path changes before IOC changes so that new IOCs\r
- * report up-to-date paths. Report new paths before removing\r
- * old ones to minimize the chance of disruption of service - \r
- * i.e. the last path being removed before an alternate is available.\r
- */\r
- __add_paths( p_iou1, &p_iou1->ioc_map, &new_paths, NULL );\r
- CL_ASSERT( !cl_fmap_count( &new_paths ) );\r
-\r
- __remove_paths( &p_iou1->ioc_map, &old_paths );\r
- CL_ASSERT( !cl_fmap_count( &old_paths ) );\r
-\r
- /* Report IOCs. */\r
- __add_iocs( p_iou1, &new_iocs, NULL );\r
- CL_ASSERT( !cl_qmap_count( &new_iocs ) );\r
-\r
- __remove_iocs( p_iou1, &old_iocs );\r
- CL_ASSERT( !cl_qmap_count( &old_iocs ) );\r
-\r
- /* Done with the duplicate IOU. Return it to the pool */\r
- cl_fmap_remove_item( p_dup_ious, p_item2 );\r
- __put_iou( gp_ioc_pnp, p_iou2 );\r
-\r
- p_item1 = cl_fmap_next( p_item1 );\r
- p_item2 = cl_fmap_head( p_dup_ious );\r
- }\r
- CL_ASSERT( !cl_fmap_count( p_dup_ious ) );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__add_ious(\r
- IN cl_fmap_t* const p_cur_ious,\r
- IN cl_fmap_t* const p_new_ious,\r
- IN al_pnp_t* const p_reg OPTIONAL )\r
-{\r
- cl_fmap_item_t *p_item;\r
- iou_node_t *p_iou;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_item = cl_fmap_head( p_new_ious );\r
- while( p_item != cl_fmap_end( p_new_ious ) )\r
- {\r
- p_iou = PARENT_STRUCT( p_item, iou_node_t, map_item );\r
-\r
- /* Report the IOU addition. */\r
- __report_iou_add( p_iou, p_reg );\r
-\r
- p_item = cl_fmap_next( p_item );\r
- }\r
-\r
- if( p_cur_ious != p_new_ious )\r
- {\r
- cl_fmap_merge( p_cur_ious, p_new_ious );\r
- CL_ASSERT( !cl_fmap_count( p_new_ious ) );\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__remove_ious(\r
- IN cl_fmap_t* const p_old_ious )\r
-{\r
- cl_fmap_item_t *p_item;\r
- iou_node_t *p_iou;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_item = cl_fmap_head( p_old_ious );\r
- while( p_item != cl_fmap_end( p_old_ious ) )\r
- {\r
- p_iou = PARENT_STRUCT( p_item, iou_node_t, map_item );\r
-\r
- /* Report the IOU removal. */\r
- __report_iou_remove( p_iou );\r
-\r
- cl_fmap_remove_item( p_old_ious, p_item );\r
- __put_iou( gp_ioc_pnp, p_iou );\r
- p_item = cl_fmap_head( p_old_ious );\r
- }\r
- CL_ASSERT( !cl_fmap_count( p_old_ious ) );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__add_iocs(\r
- IN iou_node_t* const p_iou,\r
- IN cl_qmap_t* const p_new_iocs,\r
- IN al_pnp_t* const p_reg OPTIONAL )\r
-{\r
- cl_map_item_t *p_item;\r
- iou_ioc_t *p_ioc;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_item = cl_qmap_head( p_new_iocs );\r
- while( p_item != cl_qmap_end( p_new_iocs ) )\r
- {\r
- p_ioc = PARENT_STRUCT( p_item, iou_ioc_t, map_item );\r
-\r
- /* Report the IOU addition. */\r
- __report_ioc_add( p_iou, p_ioc, p_reg );\r
-\r
- p_item = cl_qmap_next( p_item );\r
- }\r
-\r
- if( p_new_iocs != &p_iou->ioc_map )\r
- {\r
- cl_qmap_merge( &p_iou->ioc_map, p_new_iocs );\r
- CL_ASSERT( !cl_qmap_count( p_new_iocs ) );\r
- }\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__remove_iocs(\r
- IN iou_node_t* const p_iou,\r
- IN cl_qmap_t* const p_old_iocs )\r
-{\r
- cl_map_item_t *p_item;\r
- iou_ioc_t *p_ioc;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_item = cl_qmap_tail( p_old_iocs );\r
- while( p_item != cl_qmap_end( p_old_iocs ) )\r
- {\r
- p_ioc = PARENT_STRUCT( p_item, iou_ioc_t, map_item );\r
-\r
- /* Report the IOC removal. */\r
- __report_ioc_remove( p_iou, p_ioc );\r
-\r
- cl_qmap_remove_item( p_old_iocs, p_item );\r
- __put_ioc( gp_ioc_pnp, p_ioc );\r
- p_item = cl_qmap_tail( p_old_iocs );\r
- }\r
- CL_ASSERT( !cl_qmap_count( p_old_iocs ) );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__add_paths(\r
- IN iou_node_t* const p_iou,\r
- IN cl_qmap_t* const p_ioc_map,\r
- IN cl_fmap_t* const p_new_paths,\r
- IN al_pnp_t* const p_reg OPTIONAL )\r
-{\r
- cl_map_item_t *p_ioc_item;\r
- cl_fmap_item_t *p_item;\r
- iou_ioc_t *p_ioc;\r
- iou_path_t *p_path;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_item = cl_fmap_head( p_new_paths );\r
- while( p_item != cl_fmap_end( p_new_paths ) )\r
- {\r
- p_path = PARENT_STRUCT( p_item, iou_path_t, map_item );\r
-\r
- /* Report the path to all IOCs. */\r
- for( p_ioc_item = cl_qmap_head( p_ioc_map );\r
- p_ioc_item != cl_qmap_end( p_ioc_map );\r
- p_ioc_item = cl_qmap_next( p_ioc_item ) )\r
- {\r
- p_ioc = PARENT_STRUCT( p_ioc_item, iou_ioc_t, map_item );\r
- __report_path( p_ioc, p_path, IB_PNP_IOC_PATH_ADD, p_reg );\r
- }\r
-\r
- p_item = cl_fmap_next( p_item );\r
- }\r
-\r
- ASSERT( &p_iou->path_map != p_new_paths );\r
-\r
- cl_fmap_merge( &p_iou->path_map, p_new_paths );\r
- CL_ASSERT( !cl_fmap_count( p_new_paths ) );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__add_ioc_paths(\r
- IN iou_ioc_t* const p_ioc,\r
- IN cl_fmap_t* const p_new_paths,\r
- IN al_pnp_t* const p_reg OPTIONAL )\r
-{\r
- cl_fmap_item_t *p_item;\r
- iou_path_t *p_path;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_item = cl_fmap_head( p_new_paths );\r
- while( p_item != cl_fmap_end( p_new_paths ) )\r
- {\r
- p_path = PARENT_STRUCT( p_item, iou_path_t, map_item );\r
-\r
- __report_path( p_ioc, p_path, IB_PNP_IOC_PATH_ADD, p_reg );\r
-\r
- p_item = cl_fmap_next( p_item );\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__remove_paths(\r
- IN cl_qmap_t* const p_ioc_map,\r
- IN cl_fmap_t* const p_old_paths )\r
-{\r
- cl_map_item_t *p_ioc_item;\r
- cl_fmap_item_t *p_item;\r
- iou_ioc_t *p_ioc;\r
- iou_path_t *p_path;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_item = cl_fmap_tail( p_old_paths );\r
- while( p_item != cl_fmap_end( p_old_paths ) )\r
- {\r
- p_path = PARENT_STRUCT( p_item, iou_path_t, map_item );\r
-\r
- for( p_ioc_item = cl_qmap_tail( p_ioc_map );\r
- p_ioc_item != cl_qmap_end( p_ioc_map );\r
- p_ioc_item = cl_qmap_prev( p_ioc_item ) )\r
- {\r
- p_ioc = PARENT_STRUCT( p_ioc_item, iou_ioc_t, map_item );\r
- __report_path( p_ioc, p_path, IB_PNP_IOC_PATH_REMOVE, NULL );\r
- }\r
-\r
- cl_fmap_remove_item( p_old_paths, p_item );\r
- __put_path( gp_ioc_pnp, p_path );\r
- p_item = cl_fmap_tail( p_old_paths );\r
- }\r
- CL_ASSERT( !cl_fmap_count( p_old_paths ) );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static cl_status_t\r
-__notify_users(\r
- IN const cl_list_item_t* const p_item,\r
- IN al_pnp_ioc_event_t* const p_event )\r
-{\r
- ib_api_status_t status;\r
- al_pnp_t *p_reg;\r
- al_pnp_context_t *p_context;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_reg = PARENT_STRUCT( p_item, al_pnp_t, list_item );\r
-\r
- /* Copy the source record into the user's record. */\r
- cl_memcpy( p_event->p_user_rec, p_event->p_rec, p_event->rec_size );\r
- p_event->p_user_rec->h_pnp = p_reg;\r
- p_event->p_user_rec->pnp_context = (void*)p_reg->obj.context;\r
-\r
- switch( p_event->p_rec->pnp_event )\r
- {\r
- case IB_PNP_IOU_ADD:\r
- CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOU );\r
- p_context = pnp_create_context( p_reg, &p_event->p_rec->guid);\r
- break;\r
-\r
- case IB_PNP_IOU_REMOVE:\r
- CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOU );\r
- /* Lookup the context for this IOU. */\r
- p_context = pnp_get_context( p_reg, &p_event->p_rec->guid );\r
- break;\r
-\r
- case IB_PNP_IOC_ADD:\r
- CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOC );\r
- p_context = pnp_create_context( p_reg, &p_event->p_rec->guid);\r
- break;\r
- case IB_PNP_IOC_REMOVE:\r
- case IB_PNP_IOC_PATH_ADD:\r
- case IB_PNP_IOC_PATH_REMOVE:\r
- CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOC );\r
- p_context = pnp_get_context( p_reg, &p_event->p_rec->guid );\r
- break;\r
- default:\r
- AL_PRINT_EXIT(TRACE_LEVEL_WARNING, AL_DBG_PNP,("Invalid PnP event %#x\n",\r
- p_event->p_rec->pnp_event));\r
- return CL_NOT_DONE;\r
- break;\r
- }\r
- if( !p_context )\r
- return CL_NOT_FOUND;\r
-\r
- p_event->p_user_rec->context = (void*)p_context->context;\r
-\r
- /* Notify user. */\r
- status = p_reg->pfn_pnp_cb( p_event->p_user_rec );\r
-\r
- /* Update contexts */\r
- if( status != IB_SUCCESS ||\r
- p_event->p_rec->pnp_event == IB_PNP_IOU_REMOVE ||\r
- p_event->p_rec->pnp_event == IB_PNP_IOC_REMOVE )\r
- {\r
- cl_fmap_remove_item( &p_reg->context_map, &p_context->map_item );\r
- cl_free( p_context );\r
- }\r
- else\r
- {\r
- p_context->context = p_event->p_user_rec->context;\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return CL_NOT_FOUND;\r
-}\r
-\r
-\r
-static void\r
-__report_iou_add(\r
- IN iou_node_t* const p_iou,\r
- IN al_pnp_t* const p_reg OPTIONAL )\r
-{\r
- al_pnp_ioc_event_t event;\r
- ib_pnp_iou_rec_t *p_rec, *p_user_rec;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- event.rec_size = sizeof(ib_pnp_iou_rec_t);\r
- event.rec_size = ROUNDUP( event.rec_size, sizeof(void*) );\r
-\r
- p_rec = cl_zalloc( event.rec_size * 2 );\r
- if( !p_rec )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Failed to allocate user record.\n") );\r
- return;\r
- }\r
- p_rec->pnp_rec.pnp_event = IB_PNP_IOU_ADD;\r
- p_rec->pnp_rec.guid = p_iou->guid;\r
- p_rec->pnp_rec.ca_guid = p_iou->ca_guid;\r
- \r
- p_rec->ca_guid = p_iou->ca_guid;\r
- p_rec->guid = p_iou->guid;\r
- p_rec->chassis_guid = p_iou->chassis_guid;\r
- p_rec->vend_id = p_iou->vend_id;\r
- p_rec->dev_id = p_iou->dev_id;\r
- p_rec->revision = p_iou->revision;\r
- cl_memcpy( p_rec->desc, p_iou->desc, sizeof(p_rec->desc) );\r
- p_user_rec = (ib_pnp_iou_rec_t*)(((uint8_t*)p_rec) + event.rec_size);\r
- \r
- event.p_rec = (ib_pnp_rec_t*)p_rec;\r
- event.p_user_rec = (ib_pnp_rec_t*)p_user_rec;\r
-\r
- if( p_reg )\r
- {\r
- if( pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOU )\r
- __notify_users( &p_reg->list_item, &event );\r
- else\r
- __add_iocs( p_iou, &p_iou->ioc_map, p_reg );\r
- }\r
- else\r
- {\r
- /* Report the IOU to all clients registered for IOU events. */\r
- cl_qlist_find_from_head( &gp_ioc_pnp->iou_reg_list,\r
- __notify_users, &event );\r
-\r
- /* Report IOCs - this will in turn report the paths. */\r
- __add_iocs( p_iou, &p_iou->ioc_map, NULL );\r
- }\r
-\r
- cl_free( p_rec );\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__report_iou_remove(\r
- IN iou_node_t* const p_iou )\r
-{\r
- al_pnp_ioc_event_t event;\r
- ib_pnp_iou_rec_t rec, user_rec;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- /* Report IOCs - this will in turn report the paths. */\r
- __remove_iocs( p_iou, &p_iou->ioc_map );\r
-\r
- cl_memclr( &rec, sizeof(ib_pnp_iou_rec_t) );\r
- rec.pnp_rec.pnp_event = IB_PNP_IOU_REMOVE;\r
- rec.pnp_rec.guid = p_iou->guid;\r
- rec.pnp_rec.ca_guid = p_iou->ca_guid;\r
-\r
- event.rec_size = sizeof(ib_pnp_iou_rec_t);\r
- event.p_rec = (ib_pnp_rec_t*)&rec;\r
- event.p_user_rec = (ib_pnp_rec_t*)&user_rec;\r
-\r
- /*\r
- * Report the IOU to all clients registered for IOU events in\r
- * reverse order than ADD notifications.\r
- */\r
- cl_qlist_find_from_tail( &gp_ioc_pnp->iou_reg_list,\r
- __notify_users, &event );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__report_ioc_add(\r
- IN iou_node_t* const p_iou,\r
- IN iou_ioc_t* const p_ioc,\r
- IN al_pnp_t* const p_reg OPTIONAL )\r
-{\r
- al_pnp_ioc_event_t event;\r
- ib_pnp_ioc_rec_t *p_rec;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- event.rec_size = sizeof(ib_pnp_ioc_rec_t) +\r
- (sizeof(ib_svc_entry_t) * (p_ioc->profile.num_svc_entries - 1));\r
- event.rec_size = ROUNDUP( event.rec_size, sizeof(void*) );\r
-\r
- /*\r
- * The layout of the pnp record is as follows:\r
- * ib_pnp_rec_t\r
- * ib_svc_entry_t\r
- * ib_ioc_info_t\r
- *\r
- * This is needed to keep the service entries contiguous to the first\r
- * entry in the pnp record.\r
- */\r
- p_rec = (ib_pnp_ioc_rec_t*)cl_zalloc( event.rec_size * 2 );\r
- if( !p_rec )\r
- return;\r
-\r
- p_rec->pnp_rec.pnp_event = IB_PNP_IOC_ADD;\r
- p_rec->pnp_rec.guid = p_ioc->profile.ioc_guid;\r
- p_rec->pnp_rec.ca_guid = p_ioc->p_iou->ca_guid;\r
- \r
- p_rec->ca_guid = p_ioc->p_iou->ca_guid;\r
- cl_memcpy( p_rec->svc_entry_array, p_ioc->p_svc_entries,\r
- p_ioc->profile.num_svc_entries * sizeof(ib_svc_entry_t) );\r
- p_rec->info.chassis_guid = p_iou->chassis_guid;\r
- p_rec->info.chassis_slot = p_iou->slot;\r
- p_rec->info.iou_guid = p_iou->guid;\r
- p_rec->info.iou_slot = p_ioc->slot;\r
- p_rec->info.profile = p_ioc->profile;\r
-\r
- event.p_rec = (ib_pnp_rec_t*)p_rec;\r
- event.p_user_rec = (ib_pnp_rec_t*)(((uint8_t*)p_rec) + event.rec_size);\r
-\r
- if( p_reg )\r
- {\r
- __notify_users( &p_reg->list_item, &event );\r
- }\r
- else\r
- {\r
- /* Report the IOC to all clients registered for IOC events. */\r
- cl_qlist_find_from_head( &gp_ioc_pnp->ioc_reg_list,\r
- __notify_users, &event );\r
- }\r
- cl_free( p_rec );\r
-\r
- /* Report the paths for this IOC only. */\r
- __add_ioc_paths( p_ioc, &p_iou->path_map, p_reg );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__report_ioc_remove(\r
- IN iou_node_t* const p_iou,\r
- IN iou_ioc_t* const p_ioc )\r
-{\r
- al_pnp_ioc_event_t event;\r
- ib_pnp_ioc_rec_t rec, user_rec;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- UNUSED_PARAM( p_iou );\r
-\r
- cl_memclr( &rec, sizeof(ib_pnp_ioc_rec_t) );\r
- rec.pnp_rec.pnp_event = IB_PNP_IOC_REMOVE;\r
- rec.pnp_rec.guid = p_ioc->profile.ioc_guid;\r
- rec.pnp_rec.ca_guid = p_ioc->p_iou->ca_guid;\r
- \r
- event.rec_size = sizeof(ib_pnp_ioc_rec_t);\r
- event.p_rec = (ib_pnp_rec_t*)&rec;\r
- event.p_user_rec = (ib_pnp_rec_t*)&user_rec;\r
-\r
- /*\r
- * Report the IOC removal to all clients registered for IOC events in\r
- * reverse order than ADD notifications.\r
- */\r
- cl_qlist_find_from_tail( &gp_ioc_pnp->ioc_reg_list,\r
- __notify_users, &event );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__report_path(\r
- IN iou_ioc_t* const p_ioc,\r
- IN iou_path_t* const p_path,\r
- IN ib_pnp_event_t pnp_event,\r
- IN al_pnp_t* const p_reg OPTIONAL )\r
-{\r
- al_pnp_ioc_event_t event;\r
- ib_pnp_ioc_path_rec_t *p_rec;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- CL_ASSERT( pnp_event == IB_PNP_IOC_PATH_ADD ||\r
- pnp_event == IB_PNP_IOC_PATH_REMOVE );\r
-\r
- event.rec_size = sizeof(ib_pnp_ioc_path_rec_t);\r
- event.rec_size = ROUNDUP( event.rec_size, sizeof(void*) );\r
-\r
- /*\r
- * The layout of the pnp record is as follows:\r
- * ib_pnp_rec_t\r
- * ib_svc_entry_t\r
- * ib_ioc_info_t\r
- *\r
- * This is needed to keep the service entries contiguous to the first\r
- * entry in the pnp record.\r
- */\r
- p_rec = (ib_pnp_ioc_path_rec_t*)cl_zalloc( event.rec_size * 2 );\r
- if( !p_rec )\r
- return;\r
- p_rec->pnp_rec.pnp_event = pnp_event;\r
- p_rec->pnp_rec.guid = p_ioc->profile.ioc_guid;\r
- p_rec->pnp_rec.ca_guid = p_path->ca_guid;\r
- \r
- p_rec->ca_guid = p_path->ca_guid;\r
- p_rec->port_guid = p_path->port_guid;\r
- p_rec->path = p_path->rec;\r
-\r
- event.p_rec = (ib_pnp_rec_t*)p_rec;\r
- event.p_user_rec = (ib_pnp_rec_t*)(((uint8_t*)p_rec) + event.rec_size);\r
-\r
- /* Report the IOC to all clients registered for IOC events. */\r
- if( p_reg )\r
- {\r
- __notify_users( &p_reg->list_item, &event );\r
- }\r
- else\r
- {\r
- if( pnp_event == IB_PNP_IOC_PATH_ADD )\r
- {\r
- cl_qlist_find_from_head( &gp_ioc_pnp->ioc_reg_list,\r
- __notify_users, &event );\r
- }\r
- else\r
- {\r
- cl_qlist_find_from_tail( &gp_ioc_pnp->ioc_reg_list,\r
- __notify_users, &event );\r
- }\r
- }\r
-\r
- cl_free( p_rec );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-void\r
-ioc_pnp_process_reg(\r
- IN cl_async_proc_item_t *p_item )\r
-{\r
- al_pnp_t *p_reg;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_reg = PARENT_STRUCT( p_item, al_pnp_t, async_item );\r
-\r
- /* Add the registrant to the list. */\r
- switch( pnp_get_class( p_reg->pnp_class ) )\r
- {\r
- case IB_PNP_IOU:\r
- cl_qlist_insert_tail( &gp_ioc_pnp->iou_reg_list, &p_reg->list_item );\r
- break;\r
-\r
- case IB_PNP_IOC:\r
- cl_qlist_insert_tail( &gp_ioc_pnp->ioc_reg_list, &p_reg->list_item );\r
- break;\r
-\r
- default:\r
- CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOU ||\r
- pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOC );\r
- }\r
-\r
- /* Generate all relevant events for the registration. */\r
- __add_ious( &gp_ioc_pnp->iou_map, &gp_ioc_pnp->iou_map, p_reg );\r
-\r
- /* Notify the user that the registration is complete. */\r
- pnp_reg_complete( p_reg );\r
-\r
- /* Release the reference taken in init_al_obj. */\r
- deref_al_obj( &p_reg->obj );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-void\r
-ioc_pnp_process_dereg(\r
- IN cl_async_proc_item_t *p_item )\r
-{\r
- al_pnp_t *p_reg;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_reg = PARENT_STRUCT( p_item, al_pnp_t, dereg_item );\r
-\r
- /* Remove the registration information from the list. */\r
- switch( pnp_get_class( p_reg->pnp_class ) )\r
- {\r
- case IB_PNP_IOU:\r
- cl_qlist_remove_item( &gp_ioc_pnp->iou_reg_list, &p_reg->list_item );\r
- break;\r
-\r
- case IB_PNP_IOC:\r
- cl_qlist_remove_item( &gp_ioc_pnp->ioc_reg_list, &p_reg->list_item );\r
- break;\r
-\r
- default:\r
- CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOU ||\r
- pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOC );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid PnP registartion type.\n") );\r
- }\r
-\r
- /* Release the reference we took for processing the deregistration. */\r
- deref_al_obj( &p_reg->obj );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-\r
-\r
-\r
-\r
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_mad_pool.c 426 2006-07-24 19:18:19Z ftillier $\r
- */\r
-\r
-#include "al.h"\r
-#include "al_ci_ca.h"\r
-#include "al_debug.h"\r
-\r
-#if defined(EVENT_TRACING)\r
-#ifdef offsetof\r
-#undef offsetof\r
-#endif\r
-#include "al_mad_pool.tmh"\r
-#endif\r
-\r
-#include "al_mad_pool.h"\r
-#include "al_pd.h"\r
-#include "al_verbs.h"\r
-#include "ib_common.h"\r
-\r
-\r
-typedef struct _mad_send\r
-{\r
- al_mad_send_t mad_send;\r
- ib_pool_handle_t h_pool;\r
-\r
-} mad_send_t;\r
-\r
-\r
-\r
-\r
-typedef struct _mad_rmpp\r
-{\r
- al_mad_rmpp_t mad_rmpp;\r
- ib_pool_handle_t h_pool;\r
-\r
-} mad_rmpp_t;\r
-\r
-\r
-\r
-/*\r
- * Function prototypes.\r
- */\r
-static void\r
-__destroying_pool(\r
- IN al_obj_t* p_obj );\r
-\r
-static void\r
-__free_pool(\r
- IN al_obj_t* p_obj );\r
-\r
-static void\r
-__destroying_pool_key(\r
- IN al_obj_t* p_obj );\r
-\r
-static void\r
-__cleanup_pool_key(\r
- IN al_obj_t* p_obj );\r
-\r
-static void\r
-__free_pool_key(\r
- IN al_obj_t* p_obj );\r
-\r
-static cl_status_t\r
-__mad_send_init(\r
- IN void* const p_object,\r
- IN void* context,\r
- OUT cl_pool_item_t** const pp_pool_item );\r
-\r
-static cl_status_t\r
-__mad_rmpp_init(\r
- IN void* const p_object,\r
- IN void* context,\r
- OUT cl_pool_item_t** const pp_pool_item );\r
-\r
-\r
-\r
-/*\r
- * Create a MAD pool.\r
- */\r
-ib_api_status_t\r
-ib_create_mad_pool(\r
- IN const ib_al_handle_t h_al,\r
- IN const size_t min,\r
- IN const size_t max,\r
- IN const size_t grow_size,\r
- OUT ib_pool_handle_t* const ph_pool )\r
-{\r
- ib_pool_handle_t h_pool;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_MAD_POOL );\r
-\r
- if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") );\r
- return IB_INVALID_AL_HANDLE;\r
- }\r
- if( !ph_pool )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
- return IB_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate the min and max parameters. */\r
- if( (min > 0) && (max > 0) && (min > max) )\r
- return IB_INVALID_SETTING;\r
-\r
- h_pool = cl_zalloc( sizeof( al_pool_t ) );\r
- if( !h_pool )\r
- return IB_INSUFFICIENT_MEMORY;\r
-\r
- /* Initialize the pool lists. */\r
- cl_qlist_init( &h_pool->key_list );\r
- ExInitializeNPagedLookasideList( &h_pool->mad_stack, NULL, NULL,\r
- 0, sizeof(mad_item_t), 'dmla', 0 );\r
- ExInitializeNPagedLookasideList( &h_pool->mad_send_pool, NULL, NULL,\r
- 0, sizeof(mad_send_t), 'dmla', 0 );\r
- ExInitializeNPagedLookasideList( &h_pool->mad_rmpp_pool, NULL, NULL,\r
- 0, sizeof(mad_rmpp_t), 'dmla', 0 );\r
-\r
- /* Initialize the pool object. */\r
- construct_al_obj( &h_pool->obj, AL_OBJ_TYPE_H_MAD_POOL );\r
- status = init_al_obj( &h_pool->obj, h_pool, TRUE,\r
- __destroying_pool, NULL, __free_pool );\r
- if( status != IB_SUCCESS )\r
- {\r
- __free_pool( &h_pool->obj );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- /* Attach the pool to the AL object. */\r
- status = attach_al_obj( &h_al->obj, &h_pool->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- h_pool->obj.pfn_destroy( &h_pool->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- /* Save the pool parameters. Set grow_size to min for initialization. */\r
- h_pool->max = max;\r
- h_pool->grow_size = min;\r
-\r
- /* Save the grow_size for subsequent allocations. */\r
- h_pool->grow_size = grow_size;\r
-\r
- /* Return the pool handle. */\r
- *ph_pool = h_pool;\r
-\r
- /* Release the reference taken in init_al_obj. */\r
- deref_al_obj( &h_pool->obj );\r
-\r
- AL_EXIT( AL_DBG_MAD_POOL );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Pre-destory the pool.\r
- */\r
-static void\r
-__destroying_pool(\r
- IN al_obj_t* p_obj )\r
-{\r
- ib_pool_handle_t h_pool;\r
- ib_al_handle_t h_al;\r
-\r
- AL_ENTER( AL_DBG_MAD_POOL );\r
-\r
- CL_ASSERT( p_obj );\r
- h_pool = PARENT_STRUCT( p_obj, al_pool_t, obj );\r
-\r
- /* Get the AL instance of this MAD pool. */\r
- p_obj = h_pool->obj.p_parent_obj;\r
- h_al = PARENT_STRUCT( p_obj, ib_al_t, obj );\r
-\r
- /* Deregister this MAD pool from all protection domains. */\r
- al_dereg_pool( h_al, h_pool );\r
-\r
- AL_EXIT( AL_DBG_MAD_POOL );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Free the pool.\r
- */\r
-static void\r
-__free_pool(\r
- IN al_obj_t* p_obj )\r
-{\r
- ib_pool_handle_t h_pool;\r
-\r
- CL_ASSERT( p_obj );\r
- h_pool = PARENT_STRUCT( p_obj, al_pool_t, obj );\r
-\r
- ExDeleteNPagedLookasideList( &h_pool->mad_send_pool );\r
- ExDeleteNPagedLookasideList( &h_pool->mad_rmpp_pool );\r
- ExDeleteNPagedLookasideList( &h_pool->mad_stack );\r
- destroy_al_obj( &h_pool->obj );\r
- cl_free( h_pool );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Destory a MAD pool.\r
- */\r
-ib_api_status_t\r
-ib_destroy_mad_pool(\r
- IN const ib_pool_handle_t h_pool )\r
-{\r
- cl_list_item_t* p_array_item;\r
- al_obj_t* p_obj;\r
- boolean_t busy;\r
-\r
- AL_ENTER( AL_DBG_MAD_POOL );\r
-\r
- if( AL_OBJ_INVALID_HANDLE( h_pool, AL_OBJ_TYPE_H_MAD_POOL ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- /* Verify that all send handles and MAD elements are in pool. */\r
- cl_spinlock_acquire( &h_pool->obj.lock );\r
- busy = ( h_pool->obj.ref_cnt > 1 );\r
- for( p_array_item = cl_qlist_head( &h_pool->obj.obj_list );\r
- p_array_item != cl_qlist_end( &h_pool->obj.obj_list ) && !busy;\r
- p_array_item = cl_qlist_next( p_array_item ) )\r
- {\r
- p_obj = PARENT_STRUCT( p_array_item, al_obj_t, pool_item );\r
- busy = ( p_obj->ref_cnt > 1 );\r
- }\r
- cl_spinlock_release( &h_pool->obj.lock );\r
-\r
- /* Return an error if the pool is busy. */\r
- if( busy )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("h_pool (0x%016I64x) is busy!.\n", (LONG64)h_pool) );\r
- return IB_RESOURCE_BUSY;\r
- }\r
-\r
- ref_al_obj( &h_pool->obj );\r
- h_pool->obj.pfn_destroy( &h_pool->obj, NULL );\r
-\r
- AL_EXIT( AL_DBG_MAD_POOL );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Register a MAD pool with a protection domain.\r
- */\r
-ib_api_status_t\r
-ib_reg_mad_pool(\r
- IN const ib_pool_handle_t h_pool,\r
- IN const ib_pd_handle_t h_pd,\r
- OUT ib_pool_key_t* const pp_pool_key )\r
-{\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_MAD_POOL );\r
-\r
- if( AL_OBJ_INVALID_HANDLE( h_pool, AL_OBJ_TYPE_H_MAD_POOL ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );\r
- return IB_INVALID_HANDLE;\r
- }\r
- /* Alias keys require an alias PD. */\r
- if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") );\r
- return IB_INVALID_PD_HANDLE;\r
- }\r
-\r
- status = reg_mad_pool( h_pool, h_pd, pp_pool_key );\r
- /* Release the reference taken in init_al_obj. */\r
- if( status == IB_SUCCESS )\r
- deref_al_obj( &(*pp_pool_key)->obj );\r
-\r
- AL_EXIT( AL_DBG_MAD_POOL );\r
- return status;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-reg_mad_pool(\r
- IN const ib_pool_handle_t h_pool,\r
- IN const ib_pd_handle_t h_pd,\r
- OUT ib_pool_key_t* const pp_pool_key )\r
-{\r
- al_pool_key_t* p_pool_key;\r
- ib_al_handle_t h_al;\r
- ib_api_status_t status;\r
- al_key_type_t key_type;\r
-\r
- AL_ENTER( AL_DBG_MAD_POOL );\r
-\r
- if( !pp_pool_key )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
- return IB_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the type of key to create. */\r
- if( h_pd->type != IB_PDT_ALIAS )\r
- key_type = AL_KEY_NORMAL;\r
- else\r
- key_type = AL_KEY_ALIAS;\r
-\r
- /* Allocate a pool key structure. */\r
- p_pool_key = cl_zalloc( sizeof( al_pool_key_t ) );\r
- if( !p_pool_key )\r
- return IB_INSUFFICIENT_MEMORY;\r
-\r
- /* Initialize the pool key. */\r
- construct_al_obj( &p_pool_key->obj, AL_OBJ_TYPE_H_POOL_KEY );\r
- p_pool_key->type = key_type;\r
- p_pool_key->h_pool = h_pool;\r
-\r
- /* Initialize the pool key object. */\r
- status = init_al_obj( &p_pool_key->obj, p_pool_key, TRUE,\r
- __destroying_pool_key, __cleanup_pool_key, __free_pool_key );\r
- if( status != IB_SUCCESS )\r
- {\r
- __free_pool_key( &p_pool_key->obj );\r
-\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- /* Register the pool on the protection domain. */\r
- if( key_type == AL_KEY_NORMAL )\r
- {\r
- ib_phys_create_t phys_create;\r
- ib_phys_range_t phys_range;\r
- uint64_t vaddr;\r
- net32_t rkey;\r
-\r
- /* Register all of physical memory. */\r
- phys_create.length = 0xFFFFFFFFFFFFFFFF;\r
- phys_create.num_ranges = 1;\r
- phys_create.range_array = &phys_range;\r
- phys_create.buf_offset = 0;\r
- phys_create.hca_page_size = PAGE_SIZE;\r
- phys_create.access_ctrl = IB_AC_LOCAL_WRITE;\r
- phys_range.base_addr = 0;\r
- phys_range.size = 0xFFFFFFFFFFFFFFFF;\r
- vaddr = 0;\r
- status = ib_reg_phys( h_pd, &phys_create, &vaddr,\r
- &p_pool_key->lkey, &rkey, &p_pool_key->h_mr );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_pool_key->obj.pfn_destroy( &p_pool_key->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_reg_phys returned %s\n", ib_get_err_str( status )) );\r
- return status;\r
- }\r
-\r
- /* Chain the pool key onto the pool. */\r
- cl_spinlock_acquire( &h_pool->obj.lock );\r
- cl_qlist_insert_tail( &h_pool->key_list, &p_pool_key->pool_item );\r
- cl_spinlock_release( &h_pool->obj.lock );\r
- }\r
-\r
- /*\r
- * Attach to the pool after we register the memory so that PD destruction\r
- * will cleanup the pool key before its memory region.\r
- */\r
- status = attach_al_obj( &h_pd->obj, &p_pool_key->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_pool_key->obj.pfn_destroy( &p_pool_key->obj, NULL );\r
-\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("attach_al_obj returned %s\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- /* From the PD, get the AL handle of the pool_key. */\r
- h_al = h_pd->obj.h_al;\r
-\r
- /* Add this pool_key to the AL instance. */\r
- al_insert_key( h_al, p_pool_key );\r
-\r
- ref_al_obj( &h_pool->obj );\r
-\r
- /*\r
- * Take a reference on the global pool_key for this CA, if it exists.\r
- * Note that the pool_key does not exist for the global MAD pool in\r
- * user-mode, as that MAD pool never registers memory on a PD.\r
- */\r
- /* TODO: Is the pool_key check here needed since this is a kernel-only implementation? */\r
- if( key_type == AL_KEY_ALIAS && h_pd->obj.p_ci_ca->pool_key )\r
- {\r
- ref_al_obj( &h_pd->obj.p_ci_ca->pool_key->obj );\r
- p_pool_key->pool_key = h_pd->obj.p_ci_ca->pool_key;\r
- }\r
-\r
- /* Return the pool key. */\r
- *pp_pool_key = (ib_pool_key_t)p_pool_key;\r
-\r
- AL_EXIT( AL_DBG_MAD_POOL );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * The destroying callback releases the memory registration. This is needed\r
- * to maintain the destroy semantics, where the pool key's destruction is\r
- * async, but the MAD registrations are sync. This means that all memory\r
- * registered on a pool key is deregistered before the pool key leaves the\r
- * destroy call.\r
- */\r
-static void\r
-__destroying_pool_key(\r
- IN al_obj_t* p_obj )\r
-{\r
- al_pool_key_t* p_pool_key;\r
-\r
- CL_ASSERT( p_obj );\r
- p_pool_key = PARENT_STRUCT( p_obj, al_pool_key_t, obj );\r
-\r
- /* Remove this pool_key from the AL instance. */\r
- al_remove_key( p_pool_key );\r
-\r
- p_pool_key->lkey = 0;\r
-}\r
-\r
-\r
-/*\r
- * Release all references on objects that were needed by the pool key.\r
- */\r
-static void\r
-__cleanup_pool_key(\r
- IN al_obj_t* p_obj )\r
-{\r
- cl_list_item_t *p_list_item, *p_next_item;\r
- ib_mad_element_t *p_mad_element_list, *p_last_mad_element;\r
- al_mad_element_t *p_mad;\r
- ib_api_status_t status;\r
- al_pool_key_t* p_pool_key;\r
-\r
- CL_ASSERT( p_obj );\r
- p_pool_key = PARENT_STRUCT( p_obj, al_pool_key_t, obj );\r
-\r
- CL_ASSERT( !p_pool_key->mad_cnt );\r
-\r
- if( p_pool_key->h_mr )\r
- ib_dereg_mr( p_pool_key->h_mr );\r
-\r
- /* Search for any outstanding MADs associated with the given pool key. */\r
- if( p_pool_key->mad_cnt )\r
- {\r
- p_mad_element_list = p_last_mad_element = NULL;\r
-\r
- cl_spinlock_acquire( &p_pool_key->obj.h_al->mad_lock );\r
- for( p_list_item = cl_qlist_head( &p_pool_key->obj.h_al->mad_list );\r
- p_list_item != cl_qlist_end( &p_pool_key->obj.h_al->mad_list );\r
- p_list_item = p_next_item )\r
- {\r
- p_next_item = cl_qlist_next( p_list_item );\r
- p_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, al_item );\r
-\r
- if( p_mad->pool_key != p_pool_key ) continue;\r
-\r
- /* Build the list of MADs to be returned to pool. */\r
- if( p_last_mad_element )\r
- p_last_mad_element->p_next = &p_mad->element;\r
- else\r
- p_mad_element_list = &p_mad->element;\r
-\r
- p_last_mad_element = &p_mad->element;\r
- p_last_mad_element->p_next = NULL;\r
- }\r
- cl_spinlock_release( &p_pool_key->obj.h_al->mad_lock );\r
-\r
- /* Return any outstanding MADs to the pool. */\r
- if( p_mad_element_list )\r
- {\r
- status = ib_put_mad( p_mad_element_list );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_put_mad failed with status %s, continuing.\n",\r
- ib_get_err_str(status)) );\r
- }\r
- }\r
- }\r
-\r
- /*\r
- * Remove the pool key from the pool to prevent further registrations\r
- * against this pool.\r
- */\r
- if( p_pool_key->type == AL_KEY_NORMAL )\r
- {\r
- cl_spinlock_acquire( &p_pool_key->h_pool->obj.lock );\r
- cl_qlist_remove_item( &p_pool_key->h_pool->key_list,\r
- &p_pool_key->pool_item );\r
- cl_spinlock_release( &p_pool_key->h_pool->obj.lock );\r
- }\r
-\r
- deref_al_obj( &p_pool_key->h_pool->obj );\r
- p_pool_key->h_pool = NULL;\r
- if( p_pool_key->pool_key )\r
- deref_al_obj( &p_pool_key->pool_key->obj );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Free a pool key.\r
- */\r
-static void\r
-__free_pool_key(\r
- IN al_obj_t* p_obj )\r
-{\r
- al_pool_key_t* p_pool_key;\r
-\r
- CL_ASSERT( p_obj );\r
- p_pool_key = PARENT_STRUCT( p_obj, al_pool_key_t, obj );\r
-\r
- destroy_al_obj( &p_pool_key->obj );\r
- cl_free( p_pool_key );\r
-}\r
-\r
-\r
-/*\r
- * Deregister a MAD pool from a protection domain. Only normal pool_keys\r
- * can be destroyed using this routine.\r
- */\r
-ib_api_status_t\r
-ib_dereg_mad_pool(\r
- IN const ib_pool_key_t pool_key )\r
-{\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_MAD_POOL );\r
-\r
- if( AL_OBJ_INVALID_HANDLE( pool_key, AL_OBJ_TYPE_H_POOL_KEY ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
- return IB_INVALID_PARAMETER;\r
- }\r
-\r
- ref_al_obj( &pool_key->obj );\r
- status = dereg_mad_pool( pool_key, AL_KEY_NORMAL );\r
-\r
- if( status != IB_SUCCESS )\r
- deref_al_obj( &pool_key->obj );\r
-\r
- AL_EXIT( AL_DBG_MAD_POOL );\r
- return status;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Deregister a MAD pool from a protection domain.\r
- */\r
-ib_api_status_t\r
-dereg_mad_pool(\r
- IN const ib_pool_key_t pool_key,\r
- IN const al_key_type_t expected_type )\r
-{\r
- AL_ENTER( AL_DBG_MAD_POOL );\r
-\r
- if( pool_key->type != expected_type )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
- return IB_INVALID_PARAMETER;\r
- }\r
-\r
- /* Check mad_cnt to see if MADs are still outstanding. */\r
- //if( pool_key->mad_cnt )\r
- //{\r
- // AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_MAD_POOL, ("IB_RESOURCE_BUSY\n") );\r
- // return IB_RESOURCE_BUSY;\r
- //}\r
-\r
- pool_key->obj.pfn_destroy( &pool_key->obj, NULL );\r
-\r
- AL_EXIT( AL_DBG_MAD_POOL );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Obtain a MAD element from the pool.\r
- */\r
-static ib_api_status_t\r
-__get_mad_element(\r
- IN const ib_pool_key_t pool_key,\r
- OUT al_mad_element_t** pp_mad_element )\r
-{\r
- mad_item_t* p_mad_item;\r
- net32_t lkey;\r
-\r
- AL_ENTER( AL_DBG_MAD_POOL );\r
-\r
- CL_ASSERT( pool_key );\r
- CL_ASSERT( pp_mad_element );\r
-\r
- /* Obtain a MAD item from the stack. */\r
- p_mad_item = (mad_item_t*)ExAllocateFromNPagedLookasideList(\r
- &pool_key->h_pool->mad_stack );\r
- if( !p_mad_item )\r
- return IB_INSUFFICIENT_RESOURCES;\r
-\r
- p_mad_item->pool_key = pool_key;\r
-\r
- if( pool_key->type == AL_KEY_NORMAL )\r
- lkey = pool_key->lkey;\r
- else\r
- lkey = pool_key->pool_key->lkey;\r
-\r
- CL_ASSERT( ADDRESS_AND_SIZE_TO_SPAN_PAGES(\r
- p_mad_item->al_mad_element.mad_buf, MAD_BLOCK_GRH_SIZE ) == 1 );\r
-\r
- /* Clear the element. */\r
- cl_memclr( &p_mad_item->al_mad_element, sizeof(al_mad_element_t) );\r
-\r
- /* Initialize the receive data segment information. */\r
- p_mad_item->al_mad_element.grh_ds.vaddr =\r
- cl_get_physaddr( p_mad_item->al_mad_element.mad_buf );\r
- p_mad_item->al_mad_element.grh_ds.length = MAD_BLOCK_GRH_SIZE;\r
- p_mad_item->al_mad_element.grh_ds.lkey = lkey;\r
-\r
- /* Initialize the send data segment information. */\r
- p_mad_item->al_mad_element.mad_ds.vaddr =\r
- p_mad_item->al_mad_element.grh_ds.vaddr + sizeof(ib_grh_t);\r
- p_mad_item->al_mad_element.mad_ds.length = MAD_BLOCK_SIZE;\r
- p_mad_item->al_mad_element.mad_ds.lkey = lkey;\r
-\r
- /* Initialize grh */\r
- p_mad_item->al_mad_element.element.p_grh =\r
- (ib_grh_t*)p_mad_item->al_mad_element.mad_buf;\r
-\r
- /* Hold a reference on the pool key while a MAD element is removed. */\r
- ref_al_obj( &pool_key->obj );\r
- cl_atomic_inc( &pool_key->mad_cnt );\r
-\r
- p_mad_item->al_mad_element.pool_key = (ib_pool_key_t)pool_key;\r
- /* Return the MAD element. */\r
- *pp_mad_element = &p_mad_item->al_mad_element;\r
-\r
- AL_EXIT( AL_DBG_MAD_POOL );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Return a MAD element to the pool.\r
- */\r
-static void\r
-__put_mad_element(\r
- IN al_mad_element_t* p_mad_element )\r
-{\r
- mad_item_t* p_mad_item;\r
- ib_pool_key_t pool_key;\r
-\r
- CL_ASSERT( p_mad_element );\r
- p_mad_item = PARENT_STRUCT( p_mad_element, mad_item_t, al_mad_element );\r
- pool_key = p_mad_item->pool_key;\r
- CL_ASSERT( pool_key );\r
- CL_ASSERT( pool_key->h_pool );\r
-\r
- /* Clear the MAD buffer. */\r
- cl_memclr( p_mad_element->mad_buf, MAD_BLOCK_GRH_SIZE );\r
- p_mad_element->element.p_next = NULL;\r
-\r
- /* Return the MAD element to the pool. */\r
- ExFreeToNPagedLookasideList( &pool_key->h_pool->mad_stack, p_mad_item );\r
-\r
- cl_atomic_dec( &pool_key->mad_cnt );\r
- deref_al_obj( &pool_key->obj );\r
-}\r
-\r
-\r
-\r
-ib_mad_send_handle_t\r
-get_mad_send(\r
- IN const al_mad_element_t *p_mad_element )\r
-{\r
- mad_item_t* p_mad_item;\r
- mad_send_t *p_mad_send;\r
-\r
- CL_ASSERT( p_mad_element );\r
-\r
- /* Get a handle to the pool. */\r
- p_mad_item = PARENT_STRUCT( p_mad_element, mad_item_t, al_mad_element );\r
- CL_ASSERT( p_mad_item->pool_key );\r
- CL_ASSERT( p_mad_item->pool_key->h_pool );\r
-\r
- p_mad_send = ExAllocateFromNPagedLookasideList(\r
- &p_mad_item->pool_key->h_pool->mad_send_pool );\r
- if( !p_mad_send )\r
- return NULL;\r
-\r
- p_mad_send->mad_send.canceled = FALSE;\r
- p_mad_send->mad_send.p_send_mad = NULL;\r
- p_mad_send->mad_send.p_resp_mad = NULL;\r
- p_mad_send->mad_send.h_av = NULL;\r
- p_mad_send->mad_send.retry_cnt = 0;\r
- p_mad_send->mad_send.retry_time = 0;\r
- p_mad_send->mad_send.delay = 0;\r
- p_mad_send->h_pool = p_mad_item->pool_key->h_pool;\r
-\r
- ref_al_obj( &p_mad_item->pool_key->h_pool->obj );\r
- return &p_mad_send->mad_send;\r
-}\r
-\r
-\r
-\r
-void\r
-put_mad_send(\r
- IN ib_mad_send_handle_t h_mad_send )\r
-{\r
- mad_send_t *p_mad_send;\r
- ib_pool_handle_t h_pool;\r
-\r
- p_mad_send = PARENT_STRUCT( h_mad_send, mad_send_t, mad_send );\r
- h_pool = p_mad_send->h_pool;\r
-\r
- ExFreeToNPagedLookasideList( &h_pool->mad_send_pool, p_mad_send );\r
- deref_al_obj( &h_pool->obj );\r
-}\r
-\r
-\r
-\r
-al_mad_rmpp_t*\r
-get_mad_rmpp(\r
- IN const al_mad_element_t *p_mad_element )\r
-{\r
- mad_item_t *p_mad_item;\r
- mad_rmpp_t *p_mad_rmpp;\r
-\r
- CL_ASSERT( p_mad_element );\r
-\r
- /* Get a handle to the pool. */\r
- p_mad_item = PARENT_STRUCT( p_mad_element, mad_item_t, al_mad_element );\r
- CL_ASSERT( p_mad_item->pool_key );\r
- CL_ASSERT( p_mad_item->pool_key->h_pool );\r
-\r
- p_mad_rmpp = ExAllocateFromNPagedLookasideList(\r
- &p_mad_item->pool_key->h_pool->mad_rmpp_pool );\r
- if( !p_mad_rmpp )\r
- return NULL;\r
-\r
- p_mad_rmpp->h_pool = p_mad_item->pool_key->h_pool;\r
-\r
- ref_al_obj( &p_mad_item->pool_key->h_pool->obj );\r
- return &p_mad_rmpp->mad_rmpp;\r
-}\r
-\r
-\r
-\r
-void\r
-put_mad_rmpp(\r
- IN al_mad_rmpp_t* h_mad_rmpp )\r
-{\r
- mad_rmpp_t *p_mad_rmpp;\r
- ib_pool_handle_t h_pool;\r
-\r
- p_mad_rmpp = PARENT_STRUCT( h_mad_rmpp, mad_rmpp_t, mad_rmpp );\r
-\r
- h_pool = p_mad_rmpp->h_pool;\r
-\r
- ExFreeToNPagedLookasideList( &h_pool->mad_rmpp_pool, p_mad_rmpp );\r
- deref_al_obj( &h_pool->obj );\r
-}\r
-\r
-\r
-\r
-ib_api_status_t\r
-ib_get_mad(\r
- IN const ib_pool_key_t pool_key,\r
- IN const size_t buf_size,\r
- OUT ib_mad_element_t **pp_mad_element )\r
-{\r
- al_mad_element_t* p_mad;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_MAD_POOL );\r
-\r
- if( AL_OBJ_INVALID_HANDLE( pool_key, AL_OBJ_TYPE_H_POOL_KEY ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
- return IB_INVALID_PARAMETER;\r
- }\r
- if( !buf_size || !pp_mad_element )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
- return IB_INVALID_PARAMETER;\r
- }\r
-\r
- status = __get_mad_element( pool_key, &p_mad );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_EXIT( AL_DBG_MAD_POOL );\r
- return status;\r
- }\r
-\r
- /* Set the user accessible buffer. */\r
- if( buf_size <= MAD_BLOCK_SIZE )\r
- {\r
- /* Use the send buffer for 256 byte MADs. */\r
- p_mad->element.p_mad_buf = (ib_mad_t*)(p_mad->mad_buf + sizeof(ib_grh_t));\r
- }\r
- else if( buf_size >= 0xFFFFFFFF )\r
- {\r
- __put_mad_element( p_mad );\r
- return IB_INVALID_SETTING;\r
- }\r
- else\r
- {\r
- /* Allocate a new buffer for the MAD. */\r
- p_mad->p_al_mad_buf = cl_zalloc( buf_size );\r
- if( !p_mad->p_al_mad_buf )\r
- {\r
- __put_mad_element( p_mad );\r
- AL_EXIT( AL_DBG_MAD_POOL );\r
- return IB_INSUFFICIENT_MEMORY;\r
- }\r
- p_mad->element.p_mad_buf = p_mad->p_al_mad_buf;\r
- }\r
- p_mad->element.size = (uint32_t)buf_size;\r
-\r
- /* Track the MAD element with the requesting AL instance. */\r
- al_insert_mad( pool_key->h_al, p_mad );\r
-\r
- /* Return the MAD element to the client. */\r
- *pp_mad_element = &p_mad->element;\r
-\r
- AL_EXIT( AL_DBG_MAD_POOL );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-ib_api_status_t\r
-ib_put_mad(\r
- IN const ib_mad_element_t* p_mad_element_list )\r
-{\r
- al_mad_element_t* p_mad;\r
-\r
- if( !p_mad_element_list )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
- return IB_INVALID_PARAMETER;\r
- }\r
-\r
- while( p_mad_element_list )\r
- {\r
- p_mad = PARENT_STRUCT( p_mad_element_list, al_mad_element_t, element );\r
- p_mad_element_list = p_mad_element_list->p_next;\r
-\r
- /* Deallocate any buffers allocated for the user. */\r
- if( p_mad->p_al_mad_buf )\r
- {\r
- cl_free( p_mad->p_al_mad_buf );\r
- p_mad->p_al_mad_buf = NULL;\r
- }\r
-\r
- /* See if the MAD has already been returned to the MAD pool. */\r
- CL_ASSERT( p_mad->h_al );\r
-\r
- /* Remove the MAD element from the owning AL instance. */\r
- al_remove_mad( p_mad );\r
-\r
- /* Return the MAD element to the pool. */\r
- __put_mad_element( p_mad );\r
- }\r
-\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Resize the data buffer associated with a MAD element.\r
- */\r
-ib_api_status_t\r
-al_resize_mad(\r
- OUT ib_mad_element_t *p_mad_element,\r
- IN const size_t buf_size )\r
-{\r
- al_mad_element_t *p_al_element;\r
- ib_mad_t *p_new_buf;\r
-\r
- CL_ASSERT( p_mad_element );\r
-\r
- /* We only support growing the buffer for now. */\r
- CL_ASSERT( buf_size > p_mad_element->size );\r
-\r
- /* Cap the size. */\r
- if( buf_size >= 0xFFFFFFFF )\r
- return IB_INVALID_SETTING;\r
-\r
- p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t, element );\r
-\r
- /* Allocate a new buffer. */\r
- p_new_buf = cl_malloc( buf_size );\r
- if( !p_new_buf )\r
- return IB_INSUFFICIENT_MEMORY;\r
-\r
- /* Copy the existing buffer's data into the new buffer. */\r
- cl_memcpy( p_new_buf, p_mad_element->p_mad_buf, p_mad_element->size );\r
- cl_memclr( (uint8_t*)p_new_buf + p_mad_element->size,\r
- buf_size - p_mad_element->size );\r
-\r
- /* Update the MAD element to use the new buffer. */\r
- p_mad_element->p_mad_buf = p_new_buf;\r
- p_mad_element->size = (uint32_t)buf_size;\r
-\r
- /* Free any old buffer. */\r
- if( p_al_element->p_al_mad_buf )\r
- cl_free( p_al_element->p_al_mad_buf );\r
- p_al_element->p_al_mad_buf = p_new_buf;\r
-\r
- return IB_SUCCESS;\r
-}\r
-\r
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_mgr.c 548 2006-11-27 20:03:51Z leonidk $\r
- */\r
-\r
-#include <complib/cl_atomic.h>\r
-#include <complib/cl_async_proc.h>\r
-#include <complib/cl_memory.h>\r
-#include <complib/cl_qlist.h>\r
-#include <complib/cl_spinlock.h>\r
-#include <complib/cl_vector.h>\r
-\r
-#include <iba/ib_ci.h>\r
-\r
-#include "al.h"\r
-#include "al_cm_cep.h"\r
-#include "al_debug.h"\r
-\r
-#if defined(EVENT_TRACING)\r
-#ifdef offsetof\r
-#undef offsetof\r
-#endif\r
-#include "al_mgr.tmh"\r
-#endif\r
-\r
-#include "al_dm.h"\r
-#include "al_mad_pool.h"\r
-#include "al_mcast.h"\r
-#include "al_mgr.h"\r
-#include "al_pnp.h"\r
-#include "al_ioc_pnp.h"\r
-#include "al_query.h"\r
-#include "al_res_mgr.h"\r
-#include "al_smi.h"\r
-#include "ib_common.h"\r
-\r
-#ifndef CL_KERNEL\r
-#include "ual_mgr.h"\r
-#endif\r
-\r
-\r
-#define AL_HDL_VECTOR_MIN 64\r
-#define AL_HDL_VECTOR_GROW 64\r
-\r
-\r
-static void\r
-__free_al_mgr(\r
- IN al_obj_t *p_obj );\r
-\r
-void\r
-free_al(\r
- IN al_obj_t *p_obj );\r
-\r
-\r
-\r
-ib_api_status_t\r
-create_al_mgr()\r
-{\r
- cl_status_t cl_status;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_MGR );\r
-\r
- CL_ASSERT( !gp_al_mgr );\r
-\r
- gp_al_mgr = cl_zalloc( sizeof( al_mgr_t ) );\r
- if( !gp_al_mgr )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("cl_zalloc failed.\n") );\r
- return IB_INSUFFICIENT_MEMORY;\r
- }\r
-\r
- /* Construct the AL manager components. */\r
- cl_qlist_init( &gp_al_mgr->ci_ca_list );\r
- cl_qlist_init( &gp_al_mgr->al_obj_list );\r
- cl_spinlock_construct( &gp_al_mgr->lock );\r
-\r
- /* Initialize the AL management components. */\r
- construct_al_obj( &gp_al_mgr->obj, AL_OBJ_TYPE_AL_MGR );\r
- status = init_al_obj( &gp_al_mgr->obj, gp_al_mgr, FALSE,\r
- NULL, NULL, __free_al_mgr );\r
- if( status != IB_SUCCESS )\r
- {\r
- __free_al_mgr( &gp_al_mgr->obj );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("init_al_obj failed, status = 0x%x.\n", status) );\r
- return status;\r
- }\r
-\r
- cl_status = cl_spinlock_init( &gp_al_mgr->lock );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("cl_spinlock_init failed\n") );\r
- return ib_convert_cl_status( cl_status );\r
- }\r
-\r
- /* We should be able to open AL now. */\r
- status = ib_open_al( &gh_al );\r
- if( status != IB_SUCCESS )\r
- {\r
- gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_open_al failed, status = 0x%x.\n", status) );\r
- return status;\r
- }\r
-\r
- /*\r
- * Initialize the AL management services.\r
- * Create the PnP manager first - the other services depend on PnP.\r
- */\r
- status = create_pnp( &gp_al_mgr->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("al_pnp_create failed with %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- /* Create the global AL MAD pool. */\r
- status = ib_create_mad_pool( gh_al, 0, 0, 64, &gh_mad_pool );\r
- if( status != IB_SUCCESS )\r
- {\r
- gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_create_mad_pool failed with %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- /* Initialize the AL resource manager. */\r
- status = create_res_mgr( &gp_al_mgr->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("create_res_mgr failed with %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- /* Initialize the AL special QP manager. */\r
- status = create_spl_qp_mgr( &gp_al_mgr->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("create_spl_qp_mgr failed with %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- /* Initialize the AL SA request manager. */\r
- status = create_sa_req_mgr( &gp_al_mgr->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("create_sa_req_mgr failed, status = 0x%x.\n", status) );\r
- return status;\r
- }\r
-\r
- /* Initialize CM */\r
- status = create_cep_mgr( &gp_al_mgr->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("create_cm_mgr failed, status = 0x%x.\n", status) );\r
- return status;\r
- }\r
-\r
- /* Initialize the AL device management agent. */\r
-\r
-/*\r
- Disable support of DM agent.\r
-\r
- status = create_dm_agent( &gp_al_mgr->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("create_dm_agent failed, status = 0x%x.\n", status) );\r
- return status;\r
- }\r
-*/\r
- status = create_ioc_pnp( &gp_al_mgr->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("create_ioc_pnp failed, status = 0x%x.\n", status) );\r
- return status;\r
- }\r
-\r
- /* Release the reference taken in init_al_obj. */\r
- deref_al_obj( &gp_al_mgr->obj );\r
-\r
- AL_EXIT( AL_DBG_MGR );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-static void\r
-__free_al_mgr(\r
- IN al_obj_t *p_obj )\r
-{\r
- CL_ASSERT( p_obj == &gp_al_mgr->obj );\r
-\r
- /*\r
- * We need to destroy the AL object before the spinlock, since\r
- * destroying the AL object will try to acquire the spinlock.\r
- */\r
- destroy_al_obj( p_obj );\r
-\r
- /* Verify that the object list is empty. */\r
- print_al_objs( NULL );\r
-\r
- cl_spinlock_destroy( &gp_al_mgr->lock );\r
- cl_free( gp_al_mgr );\r
- gp_al_mgr = NULL;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Register a new CI CA with the access layer.\r
- */\r
-ib_api_status_t\r
-ib_register_ca(\r
- IN const ci_interface_t* p_ci )\r
-{\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_MGR );\r
-\r
- if( !p_ci )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
- return IB_INVALID_PARAMETER;\r
- }\r
- CL_ASSERT( !find_ci_ca( p_ci->guid ) );\r
-\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MGR,\r
- ("CA guid %I64x.\n", p_ci->guid) );\r
-\r
- /* Check the channel interface verbs version. */\r
- if( p_ci->version != VERBS_VERSION )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Unsupported channel interface version, "\r
- "expected = 0x%x, actual = 0x%x.\n",\r
- VERBS_VERSION, p_ci->version) );\r
- return IB_UNSUPPORTED;\r
- }\r
-\r
- /* Construct and initialize the CA structure. */\r
- status = create_ci_ca( &gp_al_mgr->obj, p_ci );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("al_mgr_ca_init failed.\n") );\r
- return status;\r
- }\r
-\r
- AL_EXIT( AL_DBG_MGR );\r
- return status;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the removal of a CI CA from the system.\r
- */\r
-ib_api_status_t\r
-ib_deregister_ca(\r
- IN const net64_t ca_guid )\r
-{\r
- al_ci_ca_t *p_ci_ca;\r
-\r
- AL_ENTER( AL_DBG_MGR );\r
-\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MGR,\r
- ("Deregistering CA guid %I64x.\n", ca_guid) );\r
-\r
- /* Locate the CA. */\r
- cl_spinlock_acquire( &gp_al_mgr->obj.lock );\r
- p_ci_ca = find_ci_ca( ca_guid );\r
- cl_spinlock_release( &gp_al_mgr->obj.lock );\r
-\r
- if( !p_ci_ca )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("CA not found.\n") );\r
- return IB_NOT_FOUND;\r
- }\r
-\r
- /*\r
- * TODO: Before destroying, do a query PnP call and return IB_BUSY\r
- * as needed.\r
- */\r
- /* Destroy the CI CA. */\r
- ref_al_obj( &p_ci_ca->obj );\r
- p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL );\r
-\r
- AL_EXIT( AL_DBG_MGR );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Initialize a proxy entry used to map user-mode to kernel-mode resources.\r
- */\r
-static cl_status_t\r
-__init_hdl(\r
- IN void* const p_element,\r
- IN void* context )\r
-{\r
- al_handle_t *p_h;\r
-\r
- p_h = (al_handle_t*)p_element;\r
-\r
- /* Chain free entries one after another. */\r
- p_h->p_obj = (al_obj_t*)(uintn_t)++(((ib_al_handle_t)context)->free_hdl);\r
- p_h->type = AL_OBJ_TYPE_UNKNOWN;\r
-\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Create a new instance of the access layer. This function is placed here\r
- * to prevent sharing the implementation with user-mode.\r
- */\r
-ib_api_status_t\r
-ib_open_al(\r
- OUT ib_al_handle_t* const ph_al )\r
-{\r
- ib_al_handle_t h_al;\r
- ib_api_status_t status;\r
- cl_status_t cl_status;\r
-\r
- AL_ENTER( AL_DBG_MGR );\r
-\r
- if( !ph_al )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
- return IB_INVALID_PARAMETER;\r
- }\r
-\r
- /* Allocate an access layer instance. */\r
- h_al = cl_zalloc( sizeof( ib_al_t ) );\r
- if( !h_al )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("cl_zalloc failed\n") );\r
- return IB_INSUFFICIENT_MEMORY;\r
- }\r
-\r
- /* Construct the instance. */\r
- construct_al_obj( &h_al->obj, AL_OBJ_TYPE_H_AL );\r
- cl_spinlock_construct( &h_al->mad_lock );\r
- cl_qlist_init( &h_al->mad_list );\r
- cl_qlist_init( &h_al->key_list );\r
- cl_qlist_init( &h_al->query_list );\r
- cl_qlist_init( &h_al->cep_list );\r
-\r
- cl_vector_construct( &h_al->hdl_vector );\r
-\r
- cl_status = cl_spinlock_init( &h_al->mad_lock );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- free_al( &h_al->obj );\r
- AL_EXIT( AL_DBG_MGR );\r
- return ib_convert_cl_status( cl_status );\r
- }\r
-\r
- /* Initialize the handle vector. */\r
- cl_status = cl_vector_init( &h_al->hdl_vector, AL_HDL_VECTOR_MIN,\r
- AL_HDL_VECTOR_GROW, sizeof(al_handle_t), __init_hdl, NULL, h_al );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- free_al( &h_al->obj );\r
- AL_EXIT( AL_DBG_MGR );\r
- return ib_convert_cl_status( cl_status );\r
- }\r
- h_al->free_hdl = 1;\r
-\r
- /* Initialize the base object. */\r
- status = init_al_obj( &h_al->obj, NULL, FALSE,\r
- destroying_al, NULL, free_al );\r
- if( status != IB_SUCCESS )\r
- {\r
- free_al( &h_al->obj );\r
- AL_EXIT( AL_DBG_MGR );\r
- return status;\r
- }\r
- status = attach_al_obj( &gp_al_mgr->obj, &h_al->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- h_al->obj.pfn_destroy( &h_al->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- /*\r
- * Self reference the AL instance so that all attached objects\r
- * insert themselve in the instance's handle manager automatically.\r
- */\r
- h_al->obj.h_al = h_al;\r
-\r
- *ph_al = h_al;\r
-\r
- /* Release the reference taken in init_al_obj. */\r
- deref_al_obj( &h_al->obj );\r
-\r
- AL_EXIT( AL_DBG_MGR );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-uint64_t\r
-al_hdl_insert(\r
- IN const ib_al_handle_t h_al,\r
- IN void* const p_obj,\r
- IN const uint32_t type )\r
-{\r
- cl_status_t status;\r
- size_t size;\r
- uint64_t hdl;\r
- al_handle_t *p_h;\r
-\r
- AL_ENTER( AL_DBG_HDL );\r
-\r
- size = cl_vector_get_size( &h_al->hdl_vector );\r
- hdl = h_al->free_hdl;\r
- if( h_al->free_hdl == size )\r
- {\r
- /* Grow the vector pool. */\r
- status =\r
- cl_vector_set_size( &h_al->hdl_vector, size + AL_HDL_VECTOR_GROW );\r
- if( status != CL_SUCCESS )\r
- {\r
- AL_EXIT( AL_DBG_HDL );\r
- return AL_INVALID_HANDLE;\r
- }\r
- /*\r
- * Return the the start of the free list since the\r
- * entry initializer incremented it.\r
- */\r
- h_al->free_hdl = size;\r
- }\r
-\r
- /* Get the next free entry. */\r
- p_h = (al_handle_t*)cl_vector_get_ptr( &h_al->hdl_vector, (size_t)hdl );\r
-\r
- /* Update the next entry index. */\r
- h_al->free_hdl = (size_t)p_h->p_obj;\r
-\r
- /* Update the entry. */\r
- p_h->type = type;\r
- p_h->p_obj = (al_obj_t*)p_obj;\r
-\r
- return hdl;\r
-}\r
-\r
-\r
-void\r
-al_hdl_free(\r
- IN const ib_al_handle_t h_al,\r
- IN const uint64_t hdl )\r
-{\r
- al_handle_t *p_h;\r
-\r
- CL_ASSERT( hdl < cl_vector_get_size( &h_al->hdl_vector ) );\r
-\r
- p_h = (al_handle_t*)cl_vector_get_ptr( &h_al->hdl_vector, (size_t)hdl );\r
- p_h->type = AL_OBJ_TYPE_UNKNOWN;\r
- p_h->p_obj = (al_obj_t*)(uintn_t)h_al->free_hdl;\r
- h_al->free_hdl = hdl;\r
-}\r
-\r
-\r
-al_obj_t*\r
-al_hdl_ref(\r
- IN const ib_al_handle_t h_al,\r
- IN const uint64_t hdl,\r
- IN const uint32_t type )\r
-{\r
- al_handle_t *p_h;\r
- al_obj_t *p_obj;\r
-\r
- CL_ASSERT( type != AL_OBJ_TYPE_H_MAD && type != AL_OBJ_TYPE_H_CONN );\r
-\r
- cl_spinlock_acquire( &h_al->obj.lock );\r
-\r
- /* Validate index. */\r
- if( hdl >= cl_vector_get_size( &h_al->hdl_vector ) )\r
- {\r
- cl_spinlock_release( &h_al->obj.lock );\r
- return NULL;\r
- }\r
-\r
- /* Get the specified entry. */\r
- p_h = (al_handle_t*)cl_vector_get_ptr( &h_al->hdl_vector, (size_t)hdl );\r
-\r
- /* Make sure that the handle is valid and the correct type. */\r
- if( type == AL_OBJ_TYPE_UNKNOWN &&\r
- p_h->type != AL_OBJ_TYPE_H_PD && p_h->type != AL_OBJ_TYPE_H_CQ &&\r
- p_h->type != AL_OBJ_TYPE_H_AV && p_h->type != AL_OBJ_TYPE_H_QP &&\r
- p_h->type != AL_OBJ_TYPE_H_MR && p_h->type != AL_OBJ_TYPE_H_MW &&\r
- p_h->type != AL_OBJ_TYPE_H_SRQ )\r
- {\r
- cl_spinlock_release( &h_al->obj.lock );\r
- return NULL;\r
- }\r
- else if( p_h->type != type )\r
- {\r
- cl_spinlock_release( &h_al->obj.lock );\r
- return NULL;\r
- }\r
-\r
- p_obj = p_h->p_obj;\r
- if( !p_obj->hdl_valid )\r
- {\r
- cl_spinlock_release( &h_al->obj.lock );\r
- return NULL;\r
- }\r
- ref_al_obj( p_obj );\r
- cl_spinlock_release( &h_al->obj.lock );\r
- return p_obj;\r
-}\r
-\r
-\r
-void*\r
-al_hdl_chk(\r
- IN const ib_al_handle_t h_al,\r
- IN const uint64_t hdl,\r
- IN const uint32_t type )\r
-{\r
- al_handle_t *p_h;\r
-\r
- /* Validate index. */\r
- if( hdl >= cl_vector_get_size( &h_al->hdl_vector ) )\r
- return NULL;\r
-\r
- /* Get the specified entry. */\r
- p_h = (al_handle_t*)cl_vector_get_ptr( &h_al->hdl_vector, (size_t)hdl );\r
-\r
- /* Make sure that the handle is valid and the correct type. */\r
- if( (p_h->type != type) )\r
- return NULL;\r
-\r
- return p_h->p_obj;\r
-}\r
-\r
-\r
-void*\r
-al_hdl_get(\r
- IN const ib_al_handle_t h_al,\r
- IN const uint64_t hdl,\r
- IN const uint32_t type )\r
-{\r
- al_handle_t *p_h;\r
- void *p_obj;\r
-\r
- cl_spinlock_acquire( &h_al->obj.lock );\r
-\r
- /* Validate index. */\r
- if( hdl >= cl_vector_get_size( &h_al->hdl_vector ) )\r
- {\r
- cl_spinlock_release( &h_al->obj.lock );\r
- return NULL;\r
- }\r
-\r
- /* Get the specified entry. */\r
- p_h = (al_handle_t*)cl_vector_get_ptr( &h_al->hdl_vector, (size_t)hdl );\r
-\r
- /* Make sure that the handle is valid and the correct type. */\r
- if( (p_h->type != type) )\r
- {\r
- cl_spinlock_release( &h_al->obj.lock );\r
- return NULL;\r
- }\r
-\r
- p_obj = (void*)p_h->p_obj;\r
-\r
- /* Clear the entry. */\r
- p_h->type = AL_OBJ_TYPE_UNKNOWN;\r
- p_h->p_obj = (al_obj_t*)(uintn_t)h_al->free_hdl;\r
- h_al->free_hdl = hdl;\r
-\r
- cl_spinlock_release( &h_al->obj.lock );\r
- return p_obj;\r
-}\r
-\r
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_mr.c 744 2007-07-31 19:04:15Z leonidk $\r
- */\r
-\r
-#include <iba/ib_al.h>\r
-\r
-#include "al_debug.h"\r
-#if defined(EVENT_TRACING)\r
-#ifdef offsetof\r
-#undef offsetof\r
-#endif\r
-#include "al_mr.tmh"\r
-#endif\r
-#include "al_mr.h"\r
-#include "al_pd.h"\r
-#include "al_res_mgr.h"\r
-#include "al_verbs.h"\r
-\r
-#include "ib_common.h"\r
-\r
-\r
-static void\r
-__cleanup_mlnx_fmr(\r
- IN struct _al_obj *p_obj );\r
-\r
-static void\r
-__return_mlnx_fmr(\r
- IN al_obj_t *p_obj );\r
-\r
-\r
-static al_shmid_t*\r
-__create_shmid(\r
- IN const int shmid );\r
-\r
-static void\r
-__free_shmid(\r
- IN struct _al_obj *p_obj );\r
-\r
-\r
-cl_status_t\r
-mlnx_fmr_ctor(\r
- IN void* const p_object,\r
- IN void* context,\r
- OUT cl_pool_item_t** const pp_pool_item )\r
-{\r
- ib_api_status_t status;\r
- mlnx_fmr_handle_t h_fmr;\r
-\r
- UNUSED_PARAM( context );\r
-\r
- h_fmr = (mlnx_fmr_handle_t)p_object;\r
- cl_memclr( h_fmr, sizeof(mlnx_fmr_t) );\r
-\r
- construct_al_obj( &h_fmr->obj, AL_OBJ_TYPE_H_FMR );\r
- status = init_al_obj( &h_fmr->obj, NULL, FALSE, NULL,\r
- __cleanup_mlnx_fmr, __return_mlnx_fmr );\r
- if( status != IB_SUCCESS )\r
- {\r
- return CL_ERROR;\r
- }\r
-\r
- *pp_pool_item = &((mlnx_fmr_handle_t)p_object)->obj.pool_item;\r
-\r
- /* Release the reference taken in init_al_obj. */\r
- deref_al_obj( &h_fmr->obj );\r
-\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-void\r
-mlnx_fmr_dtor(\r
- IN const cl_pool_item_t* const p_pool_item,\r
- IN void* context )\r
-{\r
- al_obj_t *p_obj;\r
-\r
- UNUSED_PARAM( context );\r
-\r
- p_obj = PARENT_STRUCT( p_pool_item, al_obj_t, pool_item );\r
-\r
- /*\r
- * The FMR is being totally destroyed. Modify the free_cb to destroy the\r
- * AL object.\r
- */\r
- p_obj->pfn_free = (al_pfn_free_t)destroy_al_obj;\r
- ref_al_obj( p_obj );\r
- p_obj->pfn_destroy( p_obj, NULL );\r
-}\r
-\r
-\r
-\r
-static void\r
-__cleanup_mlnx_fmr(\r
- IN struct _al_obj *p_obj )\r
-{\r
- ib_api_status_t status;\r
- mlnx_fmr_handle_t h_fmr;\r
-\r
- CL_ASSERT( p_obj );\r
- h_fmr = PARENT_STRUCT( p_obj, mlnx_fmr_t, obj );\r
-\r
- /* Deregister the memory. */\r
- if( verbs_check_mlnx_fmr( h_fmr ) )\r
- {\r
- status = verbs_destroy_mlnx_fmr( h_fmr );\r
- CL_ASSERT( status == IB_SUCCESS );\r
-\r
- h_fmr->h_ci_fmr = NULL;\r
- h_fmr->p_next = NULL;\r
- }\r
-}\r
-\r
-\r
-\r
-static void\r
-__return_mlnx_fmr(\r
- IN al_obj_t *p_obj )\r
-{\r
- mlnx_fmr_handle_t h_fmr;\r
-\r
- h_fmr = PARENT_STRUCT( p_obj, mlnx_fmr_t, obj );\r
- reset_al_obj( p_obj );\r
- put_mlnx_fmr( h_fmr );\r
-}\r
-\r
-\r
-\r
-ib_api_status_t\r
-mlnx_create_fmr(\r
- IN const ib_pd_handle_t h_pd,\r
- IN const mlnx_fmr_create_t* const p_fmr_create,\r
- OUT mlnx_fmr_handle_t* const ph_fmr )\r
-{\r
- mlnx_fmr_handle_t h_fmr;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_MR );\r
-\r
- if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") );\r
- return IB_INVALID_PD_HANDLE;\r
- }\r
-\r
- if( !p_fmr_create || !ph_fmr )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
- return IB_INVALID_PARAMETER;\r
- }\r
-\r
- /* Get a FMR tracking structure. */\r
- h_fmr = alloc_mlnx_fmr();\r
- if( !h_fmr )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("unable to allocate memory handle\n") );\r
- return IB_INSUFFICIENT_MEMORY;\r
- }\r
-\r
- status = attach_al_obj( &h_pd->obj, &h_fmr->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- h_fmr->obj.pfn_destroy( &h_fmr->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- /* Register the memory region. */\r
- status = verbs_create_mlnx_fmr( h_pd, p_fmr_create, h_fmr );\r
- if( status != IB_SUCCESS )\r
- {\r
- h_fmr->obj.pfn_destroy( &h_fmr->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("unable to register memory: %s\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- *ph_fmr = h_fmr;\r
- /* Release the reference taken in alloc_mlnx_fmr for initialization. */\r
- deref_al_obj( &(*ph_fmr )->obj );\r
-\r
- AL_EXIT( AL_DBG_MR );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-mlnx_map_phys_fmr(\r
- IN const mlnx_fmr_handle_t h_fmr,\r
- IN const uint64_t* const paddr_list,\r
- IN const int list_len,\r
- IN OUT uint64_t* const p_vaddr,\r
- OUT net32_t* const p_lkey,\r
- OUT net32_t* const p_rkey)\r
-{\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_MR );\r
-\r
- if( AL_OBJ_INVALID_HANDLE( h_fmr, AL_OBJ_TYPE_H_FMR ) )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_FMR_HANDLE\n") );\r
- return IB_INVALID_FMR_HANDLE;\r
- }\r
-\r
- if( !paddr_list || !p_vaddr || !p_lkey || !p_rkey )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
- return IB_INVALID_PARAMETER;\r
- }\r
-\r
- ref_al_obj( &h_fmr->obj );\r
-\r
- /* Register the memory region. */\r
- status = verbs_map_phys_mlnx_fmr( h_fmr, paddr_list, list_len, p_vaddr, p_lkey, p_rkey);\r
- if( status != IB_SUCCESS )\r
- {\r
- //TODO: do we need to do something more about the error ?\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("unable to map FMR: %s\n", ib_get_err_str(status)) );\r
- }\r
-\r
- deref_al_obj( &h_fmr->obj );\r
-\r
- AL_EXIT( AL_DBG_MR );\r
- return status;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-mlnx_unmap_fmr(\r
- IN const mlnx_fmr_handle_t h_fmr )\r
-{\r
- ib_api_status_t status;\r
- mlnx_fmr_t *p_fmr = (mlnx_fmr_t*)h_fmr;\r
- mlnx_fmr_t *p_cur_fmr;\r
- mlnx_fmr_handle_t *p_fmr_array;\r
- int i;\r
- \r
- AL_ENTER( AL_DBG_MR );\r
-\r
- if( AL_OBJ_INVALID_HANDLE( h_fmr, AL_OBJ_TYPE_H_FMR ) )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_FMR_HANDLE\n") );\r
- return IB_INVALID_FMR_HANDLE;\r
- }\r
-\r
- // calculate the list size\r
- for ( i=0, p_cur_fmr = p_fmr; p_cur_fmr; p_cur_fmr = p_cur_fmr->p_next)\r
- i++;\r
- \r
- // allocate the array\r
- p_fmr_array = cl_zalloc((i+1)*sizeof(mlnx_fmr_handle_t));\r
- if (!p_fmr_array)\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_FMR_HANDLE\n") );\r
- return IB_INSUFFICIENT_MEMORY;\r
- }\r
-\r
- // fill the array\r
- for ( i=0, p_cur_fmr = p_fmr; p_cur_fmr; p_cur_fmr = p_cur_fmr->p_next)\r
- {\r
- p_fmr_array[i++] = p_cur_fmr->h_ci_fmr;\r
- ref_al_obj( &p_cur_fmr->obj );\r
- }\r
- p_fmr_array[i] = NULL;\r
-\r
- // unmap the array of FMRs\r
- status = verbs_unmap_mlnx_fmr( h_fmr, p_fmr_array );\r
-\r
- // deref the objects\r
- for ( p_cur_fmr = p_fmr; p_cur_fmr; p_cur_fmr = p_cur_fmr->p_next)\r
- deref_al_obj( &p_cur_fmr->obj );\r
-\r
- cl_free( p_fmr_array );\r
- \r
- AL_EXIT( AL_DBG_MR );\r
- return status;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-mlnx_destroy_fmr(\r
- IN const mlnx_fmr_handle_t h_fmr )\r
-{\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_MR );\r
-\r
- if( AL_OBJ_INVALID_HANDLE( h_fmr, AL_OBJ_TYPE_H_FMR ) )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_FMR_HANDLE\n") );\r
- return IB_INVALID_FMR_HANDLE;\r
- }\r
-\r
- if( !verbs_check_mlnx_fmr( h_fmr ) )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_FMR_HANDLE\n") );\r
- return IB_INVALID_FMR_HANDLE;\r
- }\r
-\r
- ref_al_obj( &h_fmr->obj );\r
-\r
- /* FMR's are destroyed synchronously */\r
- status = verbs_destroy_mlnx_fmr( h_fmr );\r
-\r
- if( status == IB_SUCCESS )\r
- {\r
- h_fmr->h_ci_fmr = NULL;\r
- /* We're good to destroy the object. \r
- NOTE: No need to deref the al object , \r
- we are resetting the fmr obj before inserting it back to the pool */\r
- \r
- h_fmr->obj.pfn_destroy( &h_fmr->obj, NULL );\r
- }\r
- else\r
- {\r
- deref_al_obj( &h_fmr->obj );\r
- }\r
- AL_EXIT( AL_DBG_MR );\r
- return status;\r
-}\r
-\r
-\r
-\r
-ib_api_status_t\r
-ib_create_shmid(\r
- IN const ib_pd_handle_t h_pd,\r
- IN const int shmid,\r
- IN const ib_mr_create_t* const p_mr_create,\r
- OUT net32_t* const p_lkey,\r
- OUT net32_t* const p_rkey,\r
- OUT ib_mr_handle_t* const ph_mr )\r
-{\r
- ib_api_status_t status;\r
- cl_status_t cl_status;\r
- net32_t lkey;\r
- net32_t rkey;\r
- ib_mr_handle_t h_mr;\r
-\r
- AL_ENTER( AL_DBG_MR );\r
-\r
- if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") );\r
- return IB_INVALID_PD_HANDLE;\r
- }\r
- if( !p_mr_create || !p_lkey || !p_rkey || !ph_mr )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
- return IB_INVALID_PARAMETER;\r
- }\r
-\r
- /* Register the memory region. */\r
- status = ib_reg_mem( h_pd, p_mr_create, &lkey, &rkey, &h_mr );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("unable to register memory: %s\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- /* Create the shmid tracking structure. */\r
- h_mr->p_shmid = __create_shmid( shmid );\r
- if( !h_mr->p_shmid )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("unable to allocate shmid\n") );\r
- ib_dereg_mr( h_mr );\r
- return IB_INSUFFICIENT_MEMORY;\r
- }\r
-\r
- /*\r
- * Record that the memory region is associated with this shmid. The\r
- * insertion should automatically succeed since the list has a minimum\r
- * size of 1.\r
- */\r
- ref_al_obj( &h_mr->p_shmid->obj );\r
- cl_status = cl_list_insert_head( &h_mr->p_shmid->mr_list, h_mr );\r
- CL_ASSERT( cl_status == CL_SUCCESS );\r
-\r
- /* Add the shmid to the CI CA for tracking. */\r
- add_shmid( h_pd->obj.p_ci_ca, h_mr->p_shmid );\r
-\r
- /* Return the results. */\r
- *p_lkey = lkey;\r
- *p_rkey = rkey;\r
- *ph_mr = h_mr;\r
- AL_EXIT( AL_DBG_MR );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Allocate a new structure to track memory registrations shared across\r
- * processes.\r
- */\r
-static al_shmid_t*\r
-__create_shmid(\r
- IN const int shmid )\r
-{\r
- al_shmid_t *p_shmid;\r
- ib_api_status_t status;\r
- cl_status_t cl_status;\r
-\r
- /* Allocate the shmid structure. */\r
- p_shmid = cl_zalloc( sizeof( al_shmid_t ) );\r
- if( !p_shmid )\r
- {\r
- return NULL;\r
- }\r
-\r
- /* Construct the shmid structure. */\r
- construct_al_obj( &p_shmid->obj, AL_OBJ_TYPE_H_MR );\r
- cl_list_construct( &p_shmid->mr_list );\r
-\r
- /* Initialize the shmid structure. */\r
- status = init_al_obj( &p_shmid->obj, p_shmid, TRUE,\r
- NULL, NULL, __free_shmid );\r
- if( status != IB_SUCCESS )\r
- {\r
- __free_shmid( &p_shmid->obj );\r
- return NULL;\r
- }\r
-\r
- cl_status = cl_list_init( &p_shmid->mr_list, 1 );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- p_shmid->obj.pfn_destroy( &p_shmid->obj, NULL );\r
- return NULL;\r
- }\r
-\r
- p_shmid->id = shmid;\r
-\r
- /* Release the reference taken in init_al_obj. */\r
- deref_al_obj( &p_shmid->obj );\r
-\r
- return p_shmid;\r
-}\r
-\r
-\r
-\r
-static void\r
-__free_shmid(\r
- IN struct _al_obj *p_obj )\r
-{\r
- al_shmid_t *p_shmid;\r
-\r
- p_shmid = PARENT_STRUCT( p_obj, al_shmid_t, obj );\r
-\r
- CL_ASSERT( cl_is_list_empty( &p_shmid->mr_list ) );\r
-\r
- cl_list_destroy( &p_shmid->mr_list );\r
- destroy_al_obj( p_obj );\r
- cl_free( p_shmid );\r
-}\r
-\r
-\r
-\r
-ib_api_status_t\r
-ib_reg_shmid(\r
- IN const ib_pd_handle_t h_pd,\r
- IN const ib_shmid_t shmid,\r
- IN const ib_mr_create_t* const p_mr_create,\r
- IN OUT uint64_t* const p_vaddr,\r
- OUT net32_t* const p_lkey,\r
- OUT net32_t* const p_rkey,\r
- OUT ib_mr_handle_t* const ph_mr )\r
-{\r
- return reg_shmid( h_pd, shmid, p_mr_create, p_vaddr, p_lkey, p_rkey, ph_mr );\r
-}\r
-\r
-\r
-ib_api_status_t\r
-reg_shmid(\r
- IN const ib_pd_handle_t h_pd,\r
- IN const ib_shmid_t shmid,\r
- IN const ib_mr_create_t* const p_mr_create,\r
- IN OUT uint64_t* const p_vaddr,\r
- OUT net32_t* const p_lkey,\r
- OUT net32_t* const p_rkey,\r
- OUT ib_mr_handle_t* const ph_mr )\r
-{\r
- UNUSED_PARAM( h_pd );\r
- UNUSED_PARAM( shmid );\r
- UNUSED_PARAM( p_mr_create );\r
- UNUSED_PARAM( p_vaddr );\r
- UNUSED_PARAM( p_lkey );\r
- UNUSED_PARAM( p_rkey );\r
- UNUSED_PARAM( ph_mr );\r
- return IB_ERROR;\r
-#if 0\r
- ib_api_status_t status;\r
- cl_status_t cl_status;\r
- al_shmid_t *p_shmid;\r
- uint64_t vaddr;\r
- net32_t lkey;\r
- net32_t rkey;\r
- ib_mr_handle_t h_mr, h_reg_mr;\r
-\r
- AL_ENTER( AL_DBG_MR );\r
-\r
- if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") );\r
- return IB_INVALID_PD_HANDLE;\r
- }\r
- if( !p_vaddr || !p_lkey || !p_rkey || !ph_mr )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
- return IB_INVALID_PARAMETER;\r
- }\r
-\r
- /* Let's see if we can acquire the registered memory region. */\r
- status = acquire_shmid( h_pd->obj.p_ci_ca, shmid, &p_shmid );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("shmid not found: %s\n", ib_get_err_str(status)) );\r
- return IB_NOT_FOUND;\r
- }\r
-\r
- /* Lock down the shmid to prevent deregistrations while we register. */\r
- cl_spinlock_acquire( &p_shmid->obj.lock );\r
-\r
- /*\r
- * There's a chance after we acquired the shmid, all current\r
- * registrations were deregistered.\r
- */\r
- if( cl_is_list_empty( &p_shmid->mr_list ) )\r
- {\r
- /* There are no registrations left to share. */\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("shmid not found\n") );\r
- cl_spinlock_release( &p_shmid->obj.lock );\r
- release_shmid( p_shmid );\r
- return IB_NOT_FOUND;\r
- }\r
-\r
- /* Get a handle to an existing registered memory region. */\r
- h_reg_mr = cl_list_obj( cl_list_head( &p_shmid->mr_list ) );\r
-\r
-// BUGBUG: This release is not safe since the h_reg_mr can be deregistered.\r
- cl_spinlock_release( &p_shmid->obj.lock );\r
-\r
- /* Register the memory region. */\r
- vaddr = *p_vaddr;\r
- status = ib_reg_shared( h_reg_mr, h_pd, access_ctrl, &vaddr,\r
- &lkey, &rkey, &h_mr );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("unable to register shared memory: 0x%0I64x %s\n",\r
- vaddr, ib_get_err_str(status)) );\r
- release_shmid( p_shmid );\r
- return status;\r
- }\r
-\r
- cl_spinlock_acquire( &p_shmid->obj.lock );\r
-\r
- /* Track the registration with the shmid structure. */\r
- cl_status = cl_list_insert_head( &p_shmid->mr_list, h_mr );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("insertion into shmid list failed\n") );\r
- cl_spinlock_release( &p_shmid->obj.lock );\r
- release_shmid( p_shmid );\r
- return ib_convert_cl_status( cl_status );\r
- }\r
-\r
- cl_spinlock_release( &p_shmid->obj.lock );\r
-\r
- /* Return the results. */\r
- h_mr->p_shmid = p_shmid;\r
- *p_vaddr = vaddr;\r
- *p_lkey = lkey;\r
- *p_rkey = rkey;\r
- *ph_mr = h_mr;\r
- AL_EXIT( AL_DBG_MR );\r
- return IB_SUCCESS;\r
-#endif\r
-}\r
+++ /dev/null
-/*
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
- *
- * This software is available to you under the OpenIB.org BSD license
- * below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id: al_proxy_verbs.c 548 2006-11-27 20:03:51Z leonidk $
- */
-
-
-#include <complib/comp_lib.h>
-#include <iba/ib_al.h>
-#include <iba/ib_al_ioctl.h>
-#include "al.h"
-#include "al_mgr.h"
-#include "al_debug.h"
-
-#if defined(EVENT_TRACING)
-#ifdef offsetof
-#undef offsetof
-#endif
-#include "al_ndi_cm.tmh"
-#endif
-
-#include "al_dev.h"
-/* Get the internal definitions of apis for the proxy */
-#include "al_ca.h"
-#include "ib_common.h"
-#include "al_qp.h"
-#include "al_cm_conn.h"
-#include "al_cm_cep.h"
-#include "al_ndi_cm.h"
-
-uint32_t g_sa_timeout = 500;
-uint32_t g_sa_retries = 4;
-uint8_t g_qp_retries = QP_ATTRIB_RETRY_COUNT;
-uint8_t g_pkt_life_modifier = 0;
-uint8_t g_max_cm_retries = CM_RETRIES;
-
-/*******************************************************************
- *
- * Helpers
- *
- ******************************************************************/
-
-static char * State2String(ndi_cm_state_t state)
-{
- switch (state)
- {
- case NDI_CM_IDLE : return "NDI_CM_IDLE";
- case NDI_CM_CONNECTING_ATS_SENT : return "NDI_CM_CONNECTING_ATS_SENT";
- case NDI_CM_CONNECTING_QPR_SENT : return "NDI_CM_CONNECTING_QPR_SENT";
- case NDI_CM_CONNECTING_REQ_SENT : return "NDI_CM_CONNECTING_REQ_SENT";
- case NDI_CM_CONNECTING_REP_RCVD : return "NDI_CM_CONNECTING_REP_RCVD";
- case NDI_CM_CONNECTING_REJ_RCVD : return "NDI_CM_CONNECTING_REJ_RCVD";
- case NDI_CM_CONNECTED : return "NDI_CM_CONNECTED";
- case NDI_CM_BOUND : return "NDI_CM_BOUND";
- case NDI_CM_LISTENING : return "NDI_CM_LISTENING";
- case NDI_CM_REP_SENT : return "NDI_CM_REP_SENT";
- case NDI_CM_CONNECTED_DREP_SENT : return "NDI_CM_CONNECTED_DREP_SENT";
- case NDI_CM_CONNECTED_DREQ_SENT : return "NDI_CM_CONNECTED_DREQ_SENT";
- default :
- ASSERT(FALSE);
- }
- return "Unknown state";
-}
-
-static inline void
-__ndi_complete_irp(
- IN ib_qp_handle_t h_qp,
- IN PIRP Irp,
- IN NTSTATUS code
- )
-{
- AL_ENTER( AL_DBG_NDI );
-
- CL_ASSERT( Irp );
-
- cl_ioctl_complete( Irp, code, 0 );
- deref_al_obj( &h_qp->obj ); /* release IRP life reference */
- h_qp->p_irp_que->h_ioctl = NULL; /* mark IRP as cancelled */
-
- AL_EXIT( AL_DBG_NDI );
-}
-
-static inline void
-__ndi_complete_irp_ex(
- IN ib_qp_handle_t h_qp,
- IN NTSTATUS code,
- IN ndi_cm_state_t new_state
- )
-{
- PIRP Irp;
-
- AL_ENTER( AL_DBG_NDI );
- h_qp->p_irp_que->state = new_state;
- Irp = IoCsqRemoveNextIrp( &h_qp->p_irp_que->csq, NULL );
- if ( Irp )
- __ndi_complete_irp( h_qp, Irp, code );
- AL_EXIT( AL_DBG_NDI );
-}
-
-/*
- * Transition the QP to the error state to flush all oustanding work
- * requests and sets the timewait time. This function may be called
- * when destroying the QP in order to flush all work requests, so we
- * cannot call through the main API, or the call will fail since the
- * QP is no longer in the initialize state.
- */
-static void
-__cep_timewait_qp(
- IN const ib_qp_handle_t h_qp )
-{
- uint64_t timewait = 0;
- ib_qp_mod_t qp_mod;
- ib_api_status_t status;
-
- AL_ENTER( AL_DBG_CM );
-
- CL_ASSERT( h_qp );
-
- /*
- * The CM should have set the proper timewait time-out value. Reset
- * the QP and let it enter the timewait state.
- */
- if( al_cep_get_timewait( h_qp->obj.h_al,
- ((al_conn_qp_t*)h_qp)->cid, &timewait ) == IB_SUCCESS )
- {
- /* Special checks on the QP state for error handling - see above. */
- if( !h_qp || !AL_OBJ_IS_TYPE( h_qp, AL_OBJ_TYPE_H_QP ) ||
- ( (h_qp->obj.state != CL_INITIALIZED) &&
- (h_qp->obj.state != CL_DESTROYING) ) )
- {
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );
- return;
- }
-
- cl_memclr( &qp_mod, sizeof(ib_qp_mod_t) );
- qp_mod.req_state = IB_QPS_ERROR;
-
- /* Modify to error state using function pointers - see above. */
- status = h_qp->pfn_modify_qp( h_qp, &qp_mod, NULL );
- if( status != IB_SUCCESS )
- {
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("pfn_modify_qp to IB_QPS_ERROR returned %s\n",
- ib_get_err_str( status )) );
- return;
- }
-
-#ifdef CL_KERNEL
- /* Store the timestamp after which the QP exits timewait. */
- h_qp->timewait = cl_get_time_stamp() + timewait;
-#endif /* CL_KERNEL */
- }
-
- AL_EXIT( AL_DBG_CM );
-}
-
-static ib_api_status_t
-__ndi_qp2rts(
- IN ib_qp_handle_t const h_qp,
- IN uint8_t init_depth,
- IN uint8_t resp_res,
- IN PIRP p_irp,
- IN OUT ib_qp_mod_t *p_qp_mod
- )
-{
- ib_api_status_t status;
-
- AL_ENTER( AL_DBG_NDI );
-
- /* fill required qp attributes */
- status = al_cep_get_rtr_attr( qp_get_al( h_qp ),
- ((al_conn_qp_t*)h_qp)->cid, p_qp_mod );
- if ( status != IB_SUCCESS )
- {
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("al_cep_get_rtr_attr returned %s\n", ib_get_err_str( status )) );
- goto exit;
- }
- p_qp_mod->state.rtr.resp_res = resp_res;
-
- /* perform the request: INIT->RTR */
- status = ndi_modify_qp( h_qp, p_qp_mod,
- cl_ioctl_out_size( p_irp ), cl_ioctl_out_buf( p_irp ) );
- if ( status != IB_SUCCESS )
- {
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("ndi_modify_qp to RTR returned %s.\n", ib_get_err_str(status) ) );
- goto exit;
- }
-
- /* fill required qp attributes */
- status = al_cep_get_rts_attr( qp_get_al( h_qp ),
- ((al_conn_qp_t*)h_qp)->cid, p_qp_mod );
- if ( status != IB_SUCCESS )
- {
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("al_cep_get_rts_attr returned %s\n", ib_get_err_str( status )) );
- goto exit;
- }
- p_qp_mod->state.rts.init_depth = init_depth;
-
- /* perform the request: RTR->RTS */
- status = ndi_modify_qp( h_qp, p_qp_mod,
- cl_ioctl_out_size( p_irp ), cl_ioctl_out_buf( p_irp ) );
- if ( status != IB_SUCCESS )
- {
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("ndi_modify_qp to RTS returned %s.\n", ib_get_err_str(status) ) );
- }
-
-exit:
- AL_EXIT( AL_DBG_NDI );
- return status;
-}
-
-
-/*******************************************************************
- *
- * CSQ
- *
- ******************************************************************/
-
-static VOID __ndi_insert_irp(
- IN PIO_CSQ Csq,
- IN PIRP Irp
- )
-{
- ndi_qp_csq_t *p_ndi_csq = (ndi_qp_csq_t*)Csq;
-
- AL_ENTER( AL_DBG_NDI );
- InsertTailList( &p_ndi_csq->que, &Irp->Tail.Overlay.ListEntry );
- AL_EXIT( AL_DBG_NDI );
-}
-
-static VOID __ndi_remove_irp(
- IN PIO_CSQ Csq,
- IN PIRP Irp
- )
-{
- UNUSED_PARAM( Csq );
-
- AL_ENTER( AL_DBG_NDI );
- RemoveEntryList( &Irp->Tail.Overlay.ListEntry );
- AL_EXIT( AL_DBG_NDI );
-}
-
-static PIRP __ndi_peek_next_irp(
- IN PIO_CSQ Csq,
- IN PIRP Irp,
- IN PVOID PeekContext
- )
-{
- PIRP nextIrp = NULL;
- PLIST_ENTRY nextEntry;
- PLIST_ENTRY listHead;
- ndi_qp_csq_t *p_ndi_csq = (ndi_qp_csq_t*)Csq;
-
- AL_ENTER( AL_DBG_NDI );
-
- listHead = &p_ndi_csq->que;
-
- //
- // If the IRP is NULL, we will start peeking from the listhead, else
- // we will start from that IRP onwards. This is done under the
- // assumption that new IRPs are always inserted at the tail.
- //
-
- if(Irp == NULL)
- nextEntry = listHead->Flink;
- else
- nextEntry = Irp->Tail.Overlay.ListEntry.Flink;
-
- while(nextEntry != listHead) {
- nextIrp = CONTAINING_RECORD(nextEntry, IRP, Tail.Overlay.ListEntry);
-
- //
- // If context is present, continue until you find a matching one.
- // Else you break out as you got next one.
- //
-
- if(PeekContext)
- {
- /* for now PeekContext is not used */
- }
- else
- {
- break;
- }
-
- nextIrp = NULL;
- nextEntry = nextEntry->Flink;
- }
-
- AL_EXIT( AL_DBG_NDI );
- return nextIrp;
-}
-
-static VOID __ndi_acquire_lock(
- IN PIO_CSQ Csq,
- OUT PKIRQL Irql
- )
-{
- ndi_qp_csq_t *p_ndi_csq = (ndi_qp_csq_t*)Csq;
- ib_qp_handle_t h_qp = p_ndi_csq->h_qp;
- UNUSED_PARAM( Irql );
-
- AL_ENTER( AL_DBG_NDI );
- cl_spinlock_acquire( &h_qp->obj.lock );
- AL_EXIT( AL_DBG_NDI );
-}
-
-static VOID __ndi_release_lock(
- IN PIO_CSQ Csq,
- IN KIRQL Irql
- )
-{
- ndi_qp_csq_t *p_ndi_csq = (ndi_qp_csq_t*)Csq;
- ib_qp_handle_t h_qp = p_ndi_csq->h_qp;
- UNUSED_PARAM( Irql );
-
- AL_ENTER( AL_DBG_NDI );
- cl_spinlock_release( &h_qp->obj.lock );
- AL_EXIT( AL_DBG_NDI );
-}
-
-static VOID __ndi_complete_cancelled_irp(
- IN PIO_CSQ Csq,
- IN PIRP Irp
- )
-{
- ndi_qp_csq_t *p_ndi_csq = (ndi_qp_csq_t*)Csq;
- ib_qp_handle_t h_qp = p_ndi_csq->h_qp;
- net32_t cid;
-
- AL_ENTER( AL_DBG_NDI );
-
- switch (p_ndi_csq->state)
- {
- case NDI_CM_CONNECTING_REQ_SENT:
- /* Cleanup from issuing CM REQ. */
- ref_al_obj( &h_qp->obj );
- cid = cl_atomic_xchg( &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID );
- if( cid == AL_INVALID_CID || al_destroy_cep( qp_get_al( h_qp ), cid, deref_al_obj ) != IB_SUCCESS )
- {
- deref_al_obj( &h_qp->obj );
- }
- break;
-
- case NDI_CM_CONNECTING_ATS_SENT:
- case NDI_CM_CONNECTING_QPR_SENT:
- al_cancel_sa_req( &h_qp->p_irp_que->h_query->sa_req );
- break;
-
- default:
- /* fall through */
- break;
- }
-
- //TODO: is it always true ?
- p_ndi_csq->state = NDI_CM_IDLE;
- __ndi_complete_irp( h_qp, Irp, CL_CANCELED );
-
- AL_EXIT( AL_DBG_NDI );
-}
-
-/* flush a queue of pending requests */
-
-#pragma warning(disable:4706)
-static inline void __ndi_flush_que(
- IN ndi_qp_csq_t* p_ndi_csq,
- IN NTSTATUS completion_code
- )
-{
- PIRP Irp;
- while( Irp = IoCsqRemoveNextIrp( &p_ndi_csq->csq, NULL ) )
- {
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_NDI,
- ("h_qp %#I64x, uhdl %#I64x, ref_cnt %d\n",
- (uint64_t)p_ndi_csq->h_qp, p_ndi_csq->h_qp->obj.hdl,
- p_ndi_csq->h_qp->obj.ref_cnt ) );
- cl_ioctl_complete( Irp, completion_code, 0 );
- deref_al_obj( &p_ndi_csq->h_qp->obj ); /* release IRP life reference */
- }
-}
-#pragma warning(default:4706)
-
-void
-ndi_qp_flush_ques(
- IN ib_qp_handle_t h_qp
- )
-{
- AL_ENTER( AL_DBG_NDI );
- __ndi_flush_que( h_qp->p_irp_que, STATUS_CANCELLED );
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_NDI,
- ("h_qp %#I64x, uhdl %#I64x, ref_cnt %d\n",
- (uint64_t)h_qp, h_qp->obj.hdl, h_qp->obj.ref_cnt ) );
- AL_EXIT( AL_DBG_NDI );
-}
-
-
-
-NTSTATUS
-ndi_qp_init(
- IN ib_qp_handle_t h_qp )
-{
-
- NTSTATUS status;
-
- AL_ENTER( AL_DBG_NDI );
-
- if ( h_qp->type == IB_QPT_UNRELIABLE_CONN )
- {
- status = STATUS_SUCCESS;
- goto exit;
- }
-
- h_qp->p_irp_que = (ndi_qp_csq_t*)cl_zalloc(sizeof(ndi_qp_csq_t));
- if (!h_qp->p_irp_que)
- {
- status = STATUS_NO_MEMORY;
- goto exit;
- }
-
- status = IoCsqInitialize( &h_qp->p_irp_que->csq,
- __ndi_insert_irp, __ndi_remove_irp,
- __ndi_peek_next_irp, __ndi_acquire_lock,
- __ndi_release_lock, __ndi_complete_cancelled_irp );
- if ( !NT_SUCCESS( status ) )
- goto exit;
-
- InitializeListHead( &h_qp->p_irp_que->que );
- h_qp->p_irp_que->h_qp = h_qp;
- h_qp->p_irp_que->h_query = NULL;
- h_qp->p_irp_que->state = NDI_CM_IDLE;
- status = STATUS_SUCCESS;
-
-AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_NDI,
- ("Creating h_qp %#I64x, uhdl %#I64x \n",
- (uint64_t)h_qp, h_qp->obj.hdl ) );
-
-exit:
- AL_EXIT( AL_DBG_NDI );
- return status;
-}
-
-void
-ndi_qp_destroy(
- IN ib_qp_handle_t h_qp )
-{
- AL_ENTER( AL_DBG_NDI );
-
- if (h_qp->type == IB_QPT_RELIABLE_CONN && h_qp->p_irp_que)
- {
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_NDI,
- ("Destroying h_qp %#I64x, uhdl %#I64x, h_ioctl %p, cid %d\n",
- (uint64_t)h_qp, h_qp->obj.hdl, h_qp->p_irp_que->h_ioctl, ((al_conn_qp_t*)h_qp)->cid ) );
-
- /* cancel pending IRPS for NDI type CQ */
- ndi_qp_flush_ques( h_qp );
- }
-
- AL_EXIT( AL_DBG_NDI );
-}
-
-void
-ndi_qp_free(
- IN ib_qp_handle_t h_qp )
-{
- AL_ENTER( AL_DBG_NDI );
-
- if (h_qp->type == IB_QPT_RELIABLE_CONN && h_qp->p_irp_que)
- {
- /* free NDI context */
- cl_free( h_qp->p_irp_que );
- h_qp->p_irp_que = NULL;
- }
-
- AL_EXIT( AL_DBG_NDI );
-}
-
-
-/*******************************************************************
- *
- * REQ CM request
- *
- ******************************************************************/
-
-static void
-__ndi_req_cm_wi(
- IN DEVICE_OBJECT* p_dev_obj,
- IN void* context )
-{
- NTSTATUS status;
- ib_qp_handle_t h_qp = (ib_qp_handle_t)context;
- UNUSED_PARAM(p_dev_obj);
-
- AL_ENTER( AL_DBG_NDI );
-
- IoFreeWorkItem( h_qp->p_irp_que->p_workitem );
-
- __cep_timewait_qp( h_qp );
-
- h_qp->p_irp_que->state = NDI_CM_IDLE;
- status = (h_qp->p_irp_que->state == NDI_CM_CONNECTED) ? STATUS_CONNECTION_DISCONNECTED : STATUS_CONNECTION_REFUSED;
- __ndi_complete_irp( h_qp, h_qp->p_irp_que->h_ioctl, status );
- deref_al_obj( &h_qp->obj ); /* release IRP SA reference */
-
- AL_EXIT( AL_DBG_NDI );
-}
-
-/*
- * A user-specified callback that is invoked after receiving a connection
- * rejection message (REJ).
- */
-
-
-static void
-__ndi_proc_rej(
- IN const ib_cm_handle_t* const p_cm,
- IN const mad_cm_rej_t* const p_rej )
-{
- net32_t cid;
- NTSTATUS status;
- ib_qp_handle_t h_qp = p_cm->h_qp;
- ndi_qp_csq_t *p_ndi_csq = h_qp->p_irp_que;
-
- AL_ENTER( AL_DBG_NDI );
-
- AL_PRINT(TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("p_rej %p, h_qp %#I64x, uhdl %#I64x, connect reject, reason=%hd\n",
- p_rej, (uint64_t)h_qp, h_qp->obj.hdl, cl_ntoh16(p_rej->reason) ) );
-
- ref_al_obj( &h_qp->obj );
- cid = cl_atomic_xchg( &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID );
- CL_ASSERT( cid == p_cm->cid || cid == AL_RESERVED_CID || cid == AL_INVALID_CID );
- if( cid == AL_INVALID_CID ||
- al_destroy_cep( p_cm->h_al, cid, deref_al_obj ) != IB_SUCCESS )
- {
- deref_al_obj( &h_qp->obj );
- }
-
- switch (p_ndi_csq->state)
- {
- case NDI_CM_CONNECTING_REQ_SENT:
- al_cep_set_pdata( p_cm->h_al, p_cm->cid, IB_REJ_PDATA_SIZE, (uint8_t*)p_rej->pdata );
- AL_PRINT(TRACE_LEVEL_INFORMATION, AL_DBG_NDI ,
- ("set %d of REQ pdata to CEP with cid %d, h_al %p\n",
- IB_REJ_PDATA_SIZE, p_cm->cid, p_cm->h_al ));
- status = (p_rej->reason == IB_REJ_TIMEOUT) ? STATUS_TIMEOUT : STATUS_CONNECTION_REFUSED;
- __ndi_complete_irp_ex( h_qp, status, NDI_CM_CONNECTING_REJ_RCVD );
- break;
-
- case NDI_CM_CONNECTED:
- case NDI_CM_REP_SENT:
- /* a race: the passive side\92s REP times out, but active side has sent the RTU.
- We are treating this case it like a DREQ */
- {
- IO_STACK_LOCATION *p_io_stack;
- cl_ioctl_handle_t h_ioctl;
-
- h_ioctl = IoCsqRemoveNextIrp( &h_qp->p_irp_que->csq, NULL );
- if (!h_ioctl)
- { /* IRP has been cancelled */
- // TODO: no QP flash
- AL_PRINT(TRACE_LEVEL_INFORMATION, AL_DBG_NDI ,
- ( "IRP cancelled: Can't flush the QP %p, ndi_state %d\n",
- h_qp, p_ndi_csq->state ) );
- h_qp->p_irp_que->state = NDI_CM_IDLE;
-// ASSERT(FALSE);
- }
- else
- {
- p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );
- h_qp->p_irp_que->p_workitem = IoAllocateWorkItem( p_io_stack->DeviceObject );
- if ( h_qp->p_irp_que->p_workitem )
- { /* asyncronous performing */
- IoQueueWorkItem( h_qp->p_irp_que->p_workitem,
- __ndi_req_cm_wi, DelayedWorkQueue, h_qp );
- }
- else
- { /* syncronous performing */
- // TODO: no QP flash
- AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
- ( "Can't flush the QP %p, ndi_state %d\n",
- h_qp, p_ndi_csq->state ) );
- h_qp->p_irp_que->state = NDI_CM_IDLE;
- status = (h_qp->p_irp_que->state == NDI_CM_CONNECTED) ? STATUS_CONNECTION_DISCONNECTED : STATUS_CONNECTION_REFUSED;
- __ndi_complete_irp( h_qp, h_ioctl, status );
- deref_al_obj( &h_qp->obj ); /* release IRP SA reference */
- h_qp->p_irp_que->h_ioctl = NULL;
- }
- }
- break;
- }
-
- default:
- // This is not the state that we waited for, not much that we can
- // do. (This might happen in shutdown)
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("Not the expected state %s\n", State2String( p_ndi_csq->state )));
- ASSERT(FALSE);
- break;
- }
-
- AL_EXIT( AL_DBG_NDI );
-}
-
-static void
-__ndi_proc_rep(
- IN ib_cm_handle_t* const p_cm,
- IN mad_cm_rep_t* const p_rep )
-{
- ndi_qp_csq_t *p_ndi_csq = p_cm->h_qp->p_irp_que;
-
- AL_ENTER( AL_DBG_NDI );
-
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_NDI ,("h_qp = 0x%p\n", p_cm->h_qp));
-
- if ( p_ndi_csq->state != NDI_CM_CONNECTING_REQ_SENT)
- {
- // This is not the state that we waited for, not much that we can do
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("Not the expected state %s\n", State2String( p_ndi_csq->state )));
- ASSERT(FALSE);
- goto exit;
- }
-
- /* fill the rej data */
- al_cep_set_pdata( p_cm->h_al, p_cm->cid, IB_REJ_PDATA_SIZE, p_rep->pdata );
- AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR ,
- ("set %d of REQ pdata to CEP with cid %d, h_al %p\n",
- IB_REJ_PDATA_SIZE, p_cm->cid, p_cm->h_al ));
-
- __ndi_complete_irp_ex( p_cm->h_qp, STATUS_SUCCESS, NDI_CM_CONNECTING_REP_RCVD );
-
-exit:
- AL_EXIT( AL_DBG_NDI );
-}
-
-typedef struct _ndi_async_dreq
-{
- cl_async_proc_item_t item;
- ib_cm_handle_t cm;
-
-} ndi_async_dreq_t;
-
-static void
-__ndi_proc_dreq_async(
- IN cl_async_proc_item_t *p_item )
-{
- ib_qp_mod_t qp_mod;
- ib_api_status_t status;
- ib_cm_drep_t cm_drep = { NULL, 0 };
- ndi_async_dreq_t *p_async_dreq = PARENT_STRUCT( p_item, ndi_async_dreq_t, item );
-
- AL_ENTER( AL_DBG_NDI );
-
- /* bring QP to error state */
- cl_memclr( &qp_mod, sizeof(qp_mod) );
- qp_mod.req_state = IB_QPS_ERROR;
-
- status = modify_qp( p_async_dreq->cm.h_qp, &qp_mod, NULL );
- if ( status != IB_SUCCESS )
- {
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("al_modify_qp to ERROR returned %s.\n", ib_get_err_str(status) ) );
- goto exit;
- }
-
- status = al_cep_drep( p_async_dreq->cm.h_al,
- ((al_conn_qp_t*)p_async_dreq->cm.h_qp)->cid, &cm_drep);
-
- if ( status != IB_SUCCESS )
- {
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("al_cep_drep returned %s.\n", ib_get_err_str(status) ) );
- }
- //TODO: what state is to be set here ?
- //p_async_dreq->cm.h_qp->p_irp_que->state = NDI_CM_IDLE;
-
-exit:
- cl_free( p_async_dreq );
- AL_EXIT( AL_DBG_NDI );
-}
-
-static void
-__ndi_proc_dreq(
- IN ib_cm_handle_t* const p_cm,
- IN mad_cm_dreq_t* const p_dreq )
-{
- ndi_async_dreq_t *p_async_dreq;
- UNUSED_PARAM(p_dreq);
-
- AL_ENTER( AL_DBG_NDI );
-
- AL_PRINT(TRACE_LEVEL_INFORMATION ,AL_DBG_NDI ,("h_qp = 0x%p\n", p_cm->h_qp));
-
- p_async_dreq = (ndi_async_dreq_t*)cl_zalloc( sizeof(ndi_async_dreq_t) );
- if( !p_async_dreq )
- {
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("failed to cl_zalloc ndi_async_dreq_t (%d bytes)\n",
- sizeof(ndi_async_dreq_t)) );
- ASSERT(FALSE);
- goto exit;
- }
-
- p_async_dreq->cm = *p_cm;
- p_async_dreq->item.pfn_callback = __ndi_proc_dreq_async;
-
- /* Queue the MAD for asynchronous processing. */
- cl_async_proc_queue( gp_async_proc_mgr, &p_async_dreq->item );
-
-exit:
- AL_EXIT( AL_DBG_NDI );
-}
-
-static void
-__ndi_cm_handler(
- IN const ib_al_handle_t h_al,
- IN const net32_t cid )
-{
- void* __ptr64 context;
- net32_t new_cid;
- ib_mad_element_t *p_mad_el;
- ib_cm_handle_t h_cm;
-
- AL_ENTER( AL_DBG_NDI );
-
- while( al_cep_poll( h_al, cid, &context, &new_cid, &p_mad_el ) == IB_SUCCESS )
- {
- ib_mad_t*p_mad = ib_get_mad_buf( p_mad_el );
- ib_qp_handle_t h_qp = (ib_qp_handle_t)context;
-
- if( p_mad_el->status != IB_SUCCESS )
- {
- switch( p_mad->attr_id )
- {
- case CM_REQ_ATTR_ID:
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_NDI,
- ("REQ timeouted for CEP with cid %d, h_al %p, context %p, new_cid %d .\n",
- cid, h_al, h_qp, new_cid ) );
- __ndi_complete_irp_ex( h_qp, STATUS_TIMEOUT, NDI_CM_IDLE );
- break;
-
- case CM_REP_ATTR_ID:
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_NDI,
- ("REP timeouted for CEP with cid %d, h_al %p, context %p, new_cid %d .\n",
- cid, h_al, h_qp, new_cid ) );
- __ndi_complete_irp_ex( h_qp, STATUS_CONNECTION_ABORTED, NDI_CM_IDLE );
- break;
-
- case CM_DREQ_ATTR_ID:
- default:
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("Unhandled MAD attr ID %d for CEP with cid %d, h_al %p, context %p, new_cid %d .\n",
- p_mad->attr_id, cid, h_al, h_qp, new_cid ) );
- break;
- }
- }
- else
- {
- h_cm.h_al = h_al;
- h_cm.h_qp = h_qp;
- h_cm.cid = cid;
-
- switch( p_mad->attr_id )
- {
- case CM_REP_ATTR_ID:
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_NDI,
- ("REP received for CEP with cid %d, h_al %p, context %p, new_cid %d .\n",
- cid, h_al, h_qp, new_cid ) );
- CL_ASSERT( ((al_conn_qp_t*)h_qp)->cid == (int32_t)cid ||
- ((al_conn_qp_t*)h_qp)->cid == AL_INVALID_CID ||
- ((al_conn_qp_t*)h_qp)->cid == AL_RESERVED_CID );
- __ndi_proc_rep( &h_cm, (mad_cm_rep_t*)p_mad );
- break;
-
- case CM_REJ_ATTR_ID:
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_NDI,
- ("REJ received for CEP with cid %d, h_al %p, context %p, new_cid %d .\n",
- cid, h_al, h_qp, new_cid ) );
- __ndi_proc_rej( &h_cm, (mad_cm_rej_t*)p_mad );
- break;
-
- case CM_DREQ_ATTR_ID:
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_NDI,
- ("DREQ received for CEP with cid %d, h_al %p, context %p, new_cid %d .\n",
- cid, h_al, h_qp, new_cid ) );
- CL_ASSERT( ((al_conn_qp_t*)h_qp)->cid == (int32_t)cid ||
- ((al_conn_qp_t*)h_qp)->cid == AL_INVALID_CID ||
- ((al_conn_qp_t*)h_qp)->cid == AL_RESERVED_CID );
- __ndi_proc_dreq( &h_cm, (mad_cm_dreq_t*)p_mad );
- break;
-
- default:
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("Invalid CM recv MAD attribute ID %d.\n", p_mad->attr_id) );
- }
- }
-
- ib_put_mad( p_mad_el );
- }
-
- AL_EXIT( AL_DBG_NDI );
-}
-
-static void
-__ndi_fill_cm_req(
- IN ib_qp_handle_t const h_qp,
- IN ual_ndi_req_cm_ioctl_in_t *p_req,
- IN ib_path_rec_t *p_path_rec,
- OUT ib_cm_req_t *p_cm_req)
-{
- AL_ENTER( AL_DBG_NDI );
-
- memset( p_cm_req, 0, sizeof(ib_cm_req_t) );
-
- p_cm_req->svc_id = IB_REQ_CM_RDMA_SID_PREFIX | (p_req->prot << 16) | p_req->dst_port;
- p_cm_req->max_cm_retries = g_max_cm_retries;
- p_cm_req->p_primary_path = p_path_rec;
-
- p_cm_req->p_req_pdata = (uint8_t *)&p_req->pdata;
- p_cm_req->req_length = sizeof(p_req->pdata);
-
- p_cm_req->qp_type = IB_QPT_RELIABLE_CONN;
- p_cm_req->h_qp = h_qp;
- p_cm_req->resp_res = 0;
- p_cm_req->init_depth = 0;
-
- p_cm_req->remote_resp_timeout =
- ib_path_rec_pkt_life( p_path_rec ) + CM_REMOTE_TIMEOUT;
- if( p_cm_req->remote_resp_timeout > 0x1F )
- p_cm_req->remote_resp_timeout = 0x1F;
- else if( p_cm_req->remote_resp_timeout < CM_MIN_REMOTE_TIMEOUT )
- p_cm_req->remote_resp_timeout = CM_MIN_REMOTE_TIMEOUT;
-
- p_cm_req->flow_ctrl = TRUE; /* HCAs must support end-to-end flow control. */
-
- p_cm_req->local_resp_timeout =
- ib_path_rec_pkt_life( p_path_rec ) + CM_LOCAL_TIMEOUT;
- if( p_cm_req->local_resp_timeout > 0x1F )
- p_cm_req->local_resp_timeout = 0x1F;
- else if( p_cm_req->local_resp_timeout < CM_MIN_LOCAL_TIMEOUT )
- p_cm_req->local_resp_timeout = CM_MIN_LOCAL_TIMEOUT;
-
- p_cm_req->rnr_nak_timeout = QP_ATTRIB_RNR_NAK_TIMEOUT;
- p_cm_req->rnr_retry_cnt = QP_ATTRIB_RNR_RETRY;
- p_cm_req->retry_cnt = g_qp_retries;
- p_cm_req->p_alt_path = NULL;
-
- AL_EXIT( AL_DBG_NDI );
-}
-
-static void AL_API
-__ndi_pr_query_cb(
- ib_query_rec_t *p_query_rec )
-{
- ib_api_status_t status;
- cl_ioctl_handle_t h_ioctl;
- ib_cm_req_t cm_req;
- uint8_t pkt_life;
- ib_qp_mod_t qp_mod;
- ib_path_rec_t *p_path_rec;
- ual_ndi_req_cm_ioctl_in_t *p_req = (ual_ndi_req_cm_ioctl_in_t* __ptr64)p_query_rec->query_context;
- ib_qp_handle_t h_qp = (ib_qp_handle_t)p_req->h_qp;
- net32_t cid, old_cid;
-
- AL_ENTER( AL_DBG_NDI );
-
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_NDI,
- ("status is %d, count is %d, context %p\n", p_query_rec->status,
- p_query_rec->result_cnt, p_query_rec->query_context) );
-
- h_ioctl = IoCsqRemoveNextIrp( &h_qp->p_irp_que->csq, NULL );
- if( !h_ioctl || p_query_rec->status != IB_SUCCESS || !p_query_rec->result_cnt )
- goto err_irp_complete;
-
- /* Path Record has been received ! */
- p_path_rec = ib_get_query_path_rec( p_query_rec->p_result_mad, 0 );
-
- /* fix packet life */
- CL_ASSERT( p_path_rec );
- pkt_life = ib_path_rec_pkt_life( p_path_rec ) + g_pkt_life_modifier;
- if( pkt_life > 0x1F )
- pkt_life = 0x1F;
-
- p_path_rec->pkt_life &= IB_PATH_REC_SELECTOR_MASK;
- p_path_rec->pkt_life |= pkt_life;
-
- /* Get a CEP and bind it to the QP. */
- status = al_create_cep( qp_get_al( h_qp ), __ndi_cm_handler, h_qp, &cid );
- if( status != IB_SUCCESS )
- {
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("al_create_cep returned %s.\n", ib_get_err_str( status )) );
- goto err_irp_complete;
- }
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_NDI,
- ("Created Active CEP with cid %d, h_al %p, context %p\n",
- cid, qp_get_al( h_qp ), h_qp ) );
-
- /* See if this QP has already been connected. */
- old_cid = cl_atomic_comp_xchg( &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID, cid );
- if( old_cid != AL_INVALID_CID || h_qp->obj.state == CL_DESTROYING )
- goto err_cep_destroy;
-
- /* Format ib_cm_req_t structure */
- __ndi_fill_cm_req( h_qp, p_req, p_path_rec, &cm_req );
-
- /* prepare CEP for connection */
- status = al_cep_pre_req( qp_get_al( h_qp ),
- ((al_conn_qp_t*)h_qp)->cid, &cm_req, &qp_mod );
- if( status != IB_SUCCESS )
- {
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("al_cep_pre_req returned %s.\n", ib_get_err_str( status )) );
- goto err_cep_destroy;
- }
-
- /* insert IRP in the queue */
- h_qp->p_irp_que->state = NDI_CM_CONNECTING_REQ_SENT;
- IoCsqInsertIrp( &h_qp->p_irp_que->csq, h_ioctl, NULL );
-
- /* send CM REQ */
- status = al_cep_send_req( qp_get_al( h_qp ), cid );
- if( status != IB_SUCCESS )
- goto err_irp_remove;
-
- /* we can release it now. In case of QP destroy __destroying_qp will remove CEP */
- deref_al_obj( &h_qp->obj ); /* release IRP SA reference */
-
- /* SUCCESS ! */
- goto exit;
-
-err_irp_remove:
- h_ioctl = IoCsqRemoveNextIrp( &h_qp->p_irp_que->csq, NULL );
-
-err_cep_destroy:
- al_destroy_cep( qp_get_al( h_qp ), cid, NULL );
-
-err_irp_complete:
- h_qp->p_irp_que->state = NDI_CM_IDLE;
- /* the IRP "has" 2 QP references, taken in __ndi_ats_query */
- if ( h_ioctl )
- __ndi_complete_irp( h_qp, h_ioctl, STATUS_HOST_UNREACHABLE );
- deref_al_obj( &h_qp->obj ); /* release IRP SA reference */
-
-exit:
- cl_free( p_req );
- if( p_query_rec->p_result_mad )
- ib_put_mad( p_query_rec->p_result_mad );
-
- AL_EXIT( AL_DBG_NDI );
-}
-
-
-
-/* Synchronously query the SA for a GUID. Return 0 on success. */
-void
-__ndi_pr_query(
- IN cl_ioctl_handle_t h_ioctl,
- IN ual_ndi_req_cm_ioctl_in_t *p_req,
- IN ib_gid_t *p_dest_gid )
-{
- ib_gid_pair_t user_query;
- ib_query_req_t query_req;
- ib_api_status_t status;
- ib_qp_handle_t h_qp = (ib_qp_handle_t)p_req->h_qp;
-
- AL_ENTER( AL_DBG_NDI );
-
- query_req.query_type = IB_QUERY_PATH_REC_BY_GIDS;
- query_req.p_query_input = &user_query;
- query_req.port_guid = p_req->guid;
- query_req.timeout_ms = g_sa_timeout;
- query_req.retry_cnt = g_sa_retries;
- query_req.flags = 0; /* IB_FLAGS_SYNC */
- query_req.query_context = p_req;
- query_req.pfn_query_cb = __ndi_pr_query_cb;
-
- user_query.src_gid.unicast.prefix = p_dest_gid->unicast.prefix;
- user_query.src_gid.unicast.interface_id = p_req->guid;
- user_query.dest_gid = *p_dest_gid;
-
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_NDI,
- ("Query for path from %I64x to %I64x\n",
- p_req->guid, ib_gid_get_guid( p_dest_gid )) );
-
- /* insert IRP in the queue */
- h_qp->p_irp_que->state = NDI_CM_CONNECTING_QPR_SENT;
- IoCsqInsertIrp( &h_qp->p_irp_que->csq, h_ioctl, NULL );
-
- status = ib_query( qp_get_al( h_qp ), &query_req, &h_qp->p_irp_que->h_query );
-
- if( status != IB_SUCCESS )
- {
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("ib_query failed (%d)\n", status) );
- cl_free( p_req );
- __ndi_complete_irp_ex( h_qp, CL_ERROR, NDI_CM_IDLE );
- /* relase additional reference, taken in __ndi_ats_query */
- deref_al_obj( &h_qp->obj ); /* release IRP SA reference */
- }
-
- AL_EXIT( AL_DBG_NDI );
-}
-
-
-static void AL_API
-__ndi_ats_query_cb(
- IN ib_query_rec_t *p_query_rec )
-{
- cl_ioctl_handle_t h_ioctl;
- ib_service_record_t *service_record;
- ual_ndi_req_cm_ioctl_in_t *p_req = (ual_ndi_req_cm_ioctl_in_t* __ptr64)p_query_rec->query_context;
- ib_qp_handle_t h_qp = (ib_qp_handle_t)p_req->h_qp;
-
- AL_ENTER( AL_DBG_NDI );
-
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("status is %d, count is %d, context %p\n", p_query_rec->status,
- p_query_rec->result_cnt, p_query_rec->query_context) );
-
- h_ioctl = IoCsqRemoveNextIrp( &h_qp->p_irp_que->csq, NULL );
- if( !h_ioctl || p_query_rec->status != IB_SUCCESS || !p_query_rec->result_cnt )
- {
- h_qp->p_irp_que->state = NDI_CM_IDLE;
- if ( h_ioctl )
- __ndi_complete_irp( h_qp, h_ioctl, STATUS_HOST_UNREACHABLE );
- deref_al_obj( &h_qp->obj ); /* release IRP SA reference */
- cl_free( p_req );
- goto exit; /* ATS request failed */
- }
-
- /* send Path Request */
- service_record = ib_get_query_svc_rec( p_query_rec->p_result_mad, 0 );
- __ndi_pr_query( h_ioctl, p_req, &service_record->service_gid );
-
-exit:
- if( p_query_rec->p_result_mad )
- ib_put_mad( p_query_rec->p_result_mad );
-
- AL_EXIT( AL_DBG_NDI );
-}
-
-/* Send asynchronous query to SA for a GUID. Return STATUS_PENDING on success. */
-NTSTATUS
-__ndi_ats_query(
- IN ib_qp_handle_t const h_qp,
- IN cl_ioctl_handle_t h_ioctl,
- IN ual_ndi_req_cm_ioctl_in_t *p_req )
-{
- ib_user_query_t user_query;
- ib_service_record_t service_record;
- ib_query_req_t query_req;
- ib_api_status_t status;
- NTSTATUS nt_status = STATUS_PENDING;
-
- AL_ENTER( AL_DBG_NDI );
-
- query_req.query_type = IB_QUERY_USER_DEFINED;
- query_req.p_query_input = &user_query;
- query_req.port_guid = p_req->guid;
- query_req.timeout_ms = g_sa_timeout;
- query_req.retry_cnt = g_sa_retries;
- query_req.flags = 0; /* IB_FLAGS_SYNC */
- query_req.query_context = p_req;
- query_req.pfn_query_cb = __ndi_ats_query_cb;
-
- /* TODO: which method one is correct? */
- user_query.method = IB_MAD_METHOD_GETTABLE;
- //user_query.method = IB_MAD_METHOD_GET;
- user_query.attr_id = IB_MAD_ATTR_SERVICE_RECORD;
- user_query.attr_size = sizeof(ib_service_record_t);
- user_query.comp_mask =
- IB_SR_COMPMASK_SPKEY |
- IB_SR_COMPMASK_SLEASE |
- IB_SR_COMPMASK_SNAME |
- IB_SR_COMPMASK_SDATA8_12 |
- IB_SR_COMPMASK_SDATA8_13 | IB_SR_COMPMASK_SDATA8_14 | IB_SR_COMPMASK_SDATA8_15;
-
- user_query.p_attr = &service_record;
-
- memset( &service_record, 0, sizeof(service_record) );
- service_record.service_pkey = cl_hton16( IB_DEFAULT_PKEY );
- service_record.service_data8[12] = (uint8_t) (p_req->pdata.dst_ip_addr[3] >> 0);
- service_record.service_data8[13] = (uint8_t) (p_req->pdata.dst_ip_addr[3] >> 8);
- service_record.service_data8[14] = (uint8_t) (p_req->pdata.dst_ip_addr[3] >> 16);
- service_record.service_data8[15] = (uint8_t) (p_req->pdata.dst_ip_addr[3] >> 24);
- service_record.service_lease = 0xFFFFFFFF;
- strcpy( (void*)service_record.service_name, ATS_NAME );
-
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_NDI,
- ("ATS:: MAD: method %#x, attr_id %#hx, Service: comp_mask %#I64x, IP %d.%d.%d.%d \n",
- user_query.method,
- CL_NTOH16(user_query.attr_id),
- user_query.comp_mask,
- service_record.service_data8[12],
- service_record.service_data8[13],
- service_record.service_data8[14],
- service_record.service_data8[15]
- ) );
-
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_NDI,
- ("h_qp %#I64x, uhdl %#I64x, ref_cnt %d\n",
- (uint64_t)h_qp, h_qp->obj.hdl, h_qp->obj.ref_cnt ) );
-
- // preserve QP for the life of this IOCTL
- ref_al_obj( &h_qp->obj ); /* take IRP life reference */
-
- /* insert IRP in the queue */
- IoCsqInsertIrp( &h_qp->p_irp_que->csq, h_ioctl, NULL );
-
- /* prevent destroying QP after cancelling of the IRP and before ib_query calback*/
- ref_al_obj( &h_qp->obj ); /* take IRP SA reference */
-
- /* query SA */
- status = ib_query( qp_get_al( h_qp ), &query_req, &h_qp->p_irp_que->h_query );
- if( status != IB_SUCCESS )
- {
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("ib_query failed (%d)\n", status) );
- deref_al_obj( &h_qp->obj ); /* release IRP SA reference */
- h_ioctl = IoCsqRemoveNextIrp( &h_qp->p_irp_que->csq, NULL );
- if ( h_ioctl )
- { /* IOCTL should be released in the caller */
- deref_al_obj( &h_qp->obj ); /* release IRP life reference */
- nt_status = STATUS_DRIVER_INTERNAL_ERROR;
- }
- }
-
- AL_EXIT( AL_DBG_NDI );
- return nt_status;
-}
-
-NTSTATUS
-ndi_req_cm(
- IN ib_qp_handle_t const h_qp,
- IN cl_ioctl_handle_t h_ioctl,
- IN ual_ndi_req_cm_ioctl_in_t *p_req
- )
-{
- NTSTATUS nt_status;
-
- AL_ENTER( AL_DBG_NDI );
-
- /* check outstanding request */
- __ndi_acquire_lock( &h_qp->p_irp_que->csq, NULL);
- if ( h_qp->p_irp_que->h_ioctl )
- {
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("STATUS_CONNECTION_ACTIVE: h_qp %#I64x, uhdl %#I64x, ref_cnt %d\n",
- (uint64_t)h_qp, h_qp->obj.hdl, h_qp->obj.ref_cnt ) );
- nt_status = STATUS_CONNECTION_ACTIVE;
- __ndi_release_lock( &h_qp->p_irp_que->csq, 0 );
- goto exit;
- }
- if ( h_qp->p_irp_que->state != NDI_CM_IDLE )
- {
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("STATUS_INVALID_DEVICE_STATE: h_qp %I64x, ref_cnt %d, state %d\n",
- (uint64_t)h_qp, h_qp->obj.ref_cnt, h_qp->p_irp_que->state ) );
- nt_status = STATUS_INVALID_DEVICE_STATE;
- __ndi_release_lock( &h_qp->p_irp_que->csq, 0 );
- goto exit;
- }
- h_qp->p_irp_que->h_ioctl = h_ioctl; /* mark IRP as present */
- h_qp->p_irp_que->state = NDI_CM_CONNECTING_ATS_SENT;
- __ndi_release_lock( &h_qp->p_irp_que->csq, 0 );
-
- /* send ATS request */
- nt_status = __ndi_ats_query( h_qp, h_ioctl, p_req );
-
-exit:
- if ( nt_status != STATUS_PENDING )
- {
- cl_free( p_req );
- h_qp->p_irp_que->state = NDI_CM_IDLE;
- h_qp->p_irp_que->h_ioctl = NULL; /* mark IRP as present */
- cl_ioctl_complete( h_ioctl, nt_status, 0 );
- }
- AL_EXIT( AL_DBG_NDI );
- return STATUS_PENDING;
-}
-
-
-/*******************************************************************
- *
- * RTU CM request
- *
- ******************************************************************/
-
-static void
-__ndi_rtu_cm(
- IN DEVICE_OBJECT* p_dev_obj,
- IN PIRP p_irp )
-{
- NTSTATUS nt_status;
- ib_qp_mod_t qp_mod;
- ib_api_status_t status;
- ib_qp_handle_t h_qp = p_irp->Tail.Overlay.DriverContext[0];
- ual_ndi_rtu_cm_ioctl_in_t *p_rtu =
- (ual_ndi_rtu_cm_ioctl_in_t*)cl_ioctl_in_buf( p_irp );
- uint8_t pdata[IB_REJ_PDATA_SIZE];
- uint8_t psize = sizeof(pdata);
-
- UNUSED_PARAM(p_dev_obj);
-
- AL_ENTER( AL_DBG_NDI );
-
- /* free the work item if any */
- if ( p_irp->Tail.Overlay.DriverContext[1] )
- IoFreeWorkItem( p_irp->Tail.Overlay.DriverContext[1] );
-
- /* change the QP state to RTS */
- status = __ndi_qp2rts( h_qp, p_rtu->init_depth,
- p_rtu->resp_res, p_irp, &qp_mod );
- if ( status != IB_SUCCESS )
- {
- goto err;
- }
-
- /* send RTU */
- al_cep_get_pdata( qp_get_al( h_qp ), ((al_conn_qp_t*)h_qp)->cid, &psize, pdata );
- status = al_cep_rtu( qp_get_al( h_qp ), ((al_conn_qp_t*)h_qp)->cid, pdata, psize );
- if( status != IB_SUCCESS && status != IB_INVALID_STATE )
- {
- net32_t cid;
-err:
- /* Reject and abort the connection. */
- al_cep_rej(
- qp_get_al( h_qp ), ((al_conn_qp_t*)h_qp)->cid,
- IB_REJ_INSUF_QP, NULL, 0, NULL, 0 );
-
- __cep_timewait_qp( h_qp );
-
- cid = cl_atomic_xchg( &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID );
-
- if( cid != AL_INVALID_CID )
- {
- ref_al_obj( &h_qp->obj );
- if( al_destroy_cep( qp_get_al( h_qp ), cid, deref_al_obj ) != IB_SUCCESS )
- {
- deref_al_obj( &h_qp->obj );
- }
- }
-
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_NDI,
- ("al_cep_rtu returned %s.\n", ib_get_err_str( status )) );
- h_qp->p_irp_que->state = NDI_CM_IDLE;
- nt_status = STATUS_DRIVER_INTERNAL_ERROR;
- p_irp->IoStatus.Information = 0;
- goto exit;
- }
-
- p_irp->IoStatus.Information = sizeof(uint32_t);;
- h_qp->p_irp_que->state = NDI_CM_CONNECTED;
- nt_status = STATUS_SUCCESS;
-
-exit:
- /* release the reference only for async case */
- if ( p_irp->Tail.Overlay.DriverContext[1] )
- deref_al_obj( &h_qp->obj );
-
- p_irp->IoStatus.Status = nt_status;
- IoCompleteRequest( p_irp, IO_NO_INCREMENT );
- AL_EXIT( AL_DBG_NDI );
-}
-
-cl_status_t
-ndi_rtu_cm(
- IN ib_qp_handle_t const h_qp,
- IN PIRP p_irp
- )
-{
- IO_STACK_LOCATION *p_io_stack;
-
- AL_ENTER( AL_DBG_NDI );
-
- p_irp->Tail.Overlay.DriverContext[0] = h_qp;
- p_io_stack = IoGetCurrentIrpStackLocation( p_irp );
- p_irp->Tail.Overlay.DriverContext[1] = IoAllocateWorkItem( p_io_stack->DeviceObject );
- if ( p_irp->Tail.Overlay.DriverContext[1] )
- { /* asyncronous performing */
- /* take a ref to prevent QP destroy before calling work item */
- ref_al_obj( &h_qp->obj );
- IoMarkIrpPending( p_irp );
- IoQueueWorkItem( p_irp->Tail.Overlay.DriverContext[1],
- __ndi_rtu_cm, DelayedWorkQueue, p_irp );
- }
- else
- { /* syncronous performing */
- p_irp->Tail.Overlay.DriverContext[1] = NULL;
- __ndi_rtu_cm( p_io_stack->DeviceObject, p_irp );
- }
-
- AL_EXIT( AL_DBG_NDI );
- return CL_PENDING;
-}
-
-
-/*******************************************************************
- *
- * REP CM request
- *
- ******************************************************************/
-
-static void
-__ndi_rep_cm(
- IN DEVICE_OBJECT* p_dev_obj,
- IN PIRP p_irp )
-{
- NTSTATUS nt_status;
- ib_qp_mod_t qp_mod;
- ib_api_status_t status;
- ib_qp_handle_t h_qp = p_irp->Tail.Overlay.DriverContext[0];
- ual_ndi_rep_cm_ioctl_in_t *p_rep =
- (ual_ndi_rep_cm_ioctl_in_t*)cl_ioctl_in_buf( p_irp );
-
- UNUSED_PARAM(p_dev_obj);
-
- AL_ENTER( AL_DBG_NDI );
-
- /* free the work item if any */
- if ( p_irp->Tail.Overlay.DriverContext[1] )
- IoFreeWorkItem( p_irp->Tail.Overlay.DriverContext[1] );
-
- /* change the QP state to RTS */
- status = __ndi_qp2rts( h_qp, p_rep->init_depth,
- p_rep->resp_res, p_irp, &qp_mod );
- if ( status != IB_SUCCESS )
- {
- nt_status = STATUS_CONNECTION_ABORTED;
- goto err;
- }
-
- /* send REP */
- status = al_cep_send_rep ( qp_get_al( h_qp ), p_rep->cid );
- if( status != IB_SUCCESS )
- {
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("al_cep_send_rep returned %s\n", ib_get_err_str(status)) );
- if (status == IB_INSUFFICIENT_RESOURCES)
- nt_status = STATUS_INSUFFICIENT_RESOURCES;
- else
- nt_status = STATUS_CONNECTION_ABORTED;
-err:
- cl_atomic_xchg( &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID );
-
- /* Reject and abort the connection. */
- al_cep_rej( qp_get_al( h_qp ), p_rep->cid, IB_REJ_INSUF_QP, NULL, 0, NULL, 0 );
-
- /* transit QP to error state */
- __cep_timewait_qp( h_qp );
-
- ref_al_obj( &h_qp->obj );
- if( al_destroy_cep( qp_get_al( h_qp ), p_rep->cid, deref_al_obj ) != IB_SUCCESS )
- deref_al_obj( &h_qp->obj );
-
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("al_cep_rtu returned %s.\n", ib_get_err_str( status )) );
- h_qp->p_irp_que->state = NDI_CM_IDLE;
- p_irp->IoStatus.Information = 0;
- goto exit;
- }
-
- p_irp->IoStatus.Information = cl_ioctl_out_size( p_irp );
- h_qp->p_irp_que->state = NDI_CM_CONNECTED;
- nt_status = STATUS_SUCCESS;
-
-exit:
- /* release the reference only for async case */
- if ( p_irp->Tail.Overlay.DriverContext[1] )
- deref_al_obj( &h_qp->obj );
-
- p_irp->IoStatus.Status = nt_status;
- IoCompleteRequest( p_irp, IO_NO_INCREMENT );
- AL_EXIT( AL_DBG_NDI );
-}
-
-static void
-__ndi_fill_cm_rep(
- IN ib_qp_handle_t const h_qp,
- IN ual_ndi_rep_cm_ioctl_in_t *p_rep,
- OUT ib_cm_rep_t *p_cm_rep)
-{
- AL_ENTER( AL_DBG_NDI );
-
- memset( p_cm_rep, 0, sizeof(ib_cm_rep_t) );
-
- p_cm_rep->p_rep_pdata = p_rep->pdata;
- p_cm_rep->rep_length = sizeof(p_rep->pdata);
-
- p_cm_rep->qp_type = IB_QPT_RELIABLE_CONN;
- p_cm_rep->h_qp = h_qp;
-
- p_cm_rep->access_ctrl = IB_AC_RDMA_READ | IB_AC_RDMA_WRITE | IB_AC_LOCAL_WRITE;
- p_cm_rep->init_depth = 0;
- p_cm_rep->target_ack_delay = 10;
- p_cm_rep->failover_accepted = IB_FAILOVER_ACCEPT_UNSUPPORTED;
- p_cm_rep->flow_ctrl = TRUE; /* HCAs must support end-to-end flow control. */
- p_cm_rep->rnr_nak_timeout = QP_ATTRIB_RNR_NAK_TIMEOUT;
- p_cm_rep->rnr_retry_cnt = QP_ATTRIB_RNR_RETRY;
-
- AL_EXIT( AL_DBG_NDI );
-}
-
-NTSTATUS
-ndi_rep_cm(
- IN ib_qp_handle_t const h_qp,
- IN net32_t const cid,
- IN PIRP p_irp,
- IN ual_ndi_rep_cm_ioctl_in_t *p_rep
- )
-{
- IO_STACK_LOCATION *p_io_stack;
- ib_cm_rep_t cm_rep;
- ib_qp_mod_t qp_mod;
- net32_t old_cid;
- ib_api_status_t status;
- NTSTATUS nt_status;
-
- AL_ENTER( AL_DBG_NDI );
-
- /* Format ib_cm_req_t structure */
- __ndi_fill_cm_rep( h_qp, p_rep, &cm_rep );
-
- /* prepare Passive CEP for connection */
- status = al_cep_pre_rep_ex( qp_get_al( h_qp ), cid, __ndi_cm_handler, h_qp, &cm_rep, &qp_mod );
- if( status != IB_SUCCESS )
- {
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("al_cep_pre_rep_ex returned %s.\n", ib_get_err_str( status )) );
- switch (status)
- {
- case IB_INVALID_HANDLE:
- nt_status = STATUS_CANCELLED;
- break;
- case IB_INVALID_STATE:
- nt_status = STATUS_CONNECTION_ABORTED;
- break;
- default:
- nt_status = STATUS_INSUFFICIENT_RESOURCES;
- break;
- }
- goto err_cep_destroy;
- }
-
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_NDI,
- ("Prepared Passive CEP with cid %d, h_al %p, context %p\n",
- cid, qp_get_al( h_qp ), h_qp ) );
-
- /* See if this QP has already been connected. */
- old_cid = cl_atomic_comp_xchg( &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID, cid );
- if( old_cid != AL_INVALID_CID )
- {
- nt_status = STATUS_CONNECTION_ACTIVE;
- goto err_cep_destroy;
- }
-
- /* transfer work to a work the thread */
- p_irp->Tail.Overlay.DriverContext[0] = h_qp;
- p_io_stack = IoGetCurrentIrpStackLocation( p_irp );
- p_irp->Tail.Overlay.DriverContext[1] = IoAllocateWorkItem( p_io_stack->DeviceObject );
- if ( p_irp->Tail.Overlay.DriverContext[1] )
- { /* asyncronous performing */
- /* take a ref to prevent QP destroy before calling work item */
- ref_al_obj( &h_qp->obj );
- IoMarkIrpPending( p_irp );
- IoQueueWorkItem( p_irp->Tail.Overlay.DriverContext[1],
- __ndi_rep_cm, DelayedWorkQueue, p_irp );
- }
- else
- { /* syncronous performing */
- __ndi_rep_cm( p_io_stack->DeviceObject, p_irp );
- }
- goto exit;
-
-err_cep_destroy:
- al_destroy_cep( qp_get_al( h_qp ), cid, NULL );
-
- h_qp->p_irp_que->state = NDI_CM_IDLE;
- cl_ioctl_complete( p_irp, nt_status, 0 );
-
-exit:
- AL_EXIT( AL_DBG_NDI );
- return CL_PENDING;
-}
-
-
-/*******************************************************************
- *
- * DREQ CM request
- *
- ******************************************************************/
-
-NTSTATUS
-ndi_dreq_cm(
- IN ib_qp_handle_t const h_qp,
- IN PIRP p_irp
- )
-{
- ib_qp_mod_t qp_mod;
- net32_t cid;
- ib_api_status_t status;
- NTSTATUS nt_status;
- uint64_t timewait_us;
-
- AL_ENTER( AL_DBG_NDI );
-
- status = al_cep_get_timewait( qp_get_al( h_qp ),
- ((al_conn_qp_t*)h_qp)->cid, &timewait_us );
-
- if (status != IB_SUCCESS)
- {
- nt_status = STATUS_CONNECTION_INVALID;
- goto exit;
- }
-
- /* bring QP to error state */
- cl_memclr( &qp_mod, sizeof(qp_mod) );
- qp_mod.req_state = IB_QPS_ERROR;
-
- status = ndi_modify_qp( h_qp, &qp_mod,
- cl_ioctl_out_size( p_irp ), cl_ioctl_out_buf( p_irp ) );
- if ( status != IB_SUCCESS )
- {
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("ndi_modify_qp to ERROR returned %s.\n", ib_get_err_str(status) ) );
- nt_status = STATUS_CONNECTION_INVALID;
- goto exit;
- }
-
- /* Store the timestamp after which the QP exits timewait. */
- h_qp->timewait = cl_get_time_stamp() + timewait_us;
-
- cid = cl_atomic_xchg( &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID );
-
- if( cid != AL_INVALID_CID )
- {
- ref_al_obj( &h_qp->obj );
- if( al_destroy_cep( qp_get_al( h_qp ), cid, deref_al_obj ) != IB_SUCCESS )
- {
- deref_al_obj( &h_qp->obj );
- }
- }
-
- nt_status = STATUS_SUCCESS;
-
-exit:
- h_qp->p_irp_que->state = NDI_CM_IDLE;
- cl_ioctl_complete( p_irp, nt_status, cl_ioctl_out_size( p_irp ) );
-
- AL_EXIT( AL_DBG_NDI );
- return STATUS_EVENT_DONE; /* CL_COMPLETED */
-}
-
+++ /dev/null
-/*
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
- *
- * This software is available to you under the OpenIB.org BSD license
- * below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id: al_proxy.h 33 2005-07-11 19:51:17Z ftillier $
- */
-
-/*
- * Abstract:
- * This header file defines data structures for the kernel-mode NDI support
- *
- * Environment:
- * Kernel .
- */
-
-
-#ifndef _AL_NDI_CM_H_
-#define _AL_NDI_CM_H_
-
-#include "complib/cl_ioctl_osd.h"
-
-/* QP creation parameters */
-#define QP_ATTRIB_RESPONDER_RESOURCES 4
-#define QP_ATTRIB_INITIATOR_DEPTH 4
-#define QP_ATTRIB_RETRY_COUNT 6
-#define QP_ATTRIB_RNR_RETRY 7
-#define QP_ATTRIB_RNR_NAK_TIMEOUT 8 /* 16 ms */
-
-#define QP_ATTRIB_SQ_DEPTH 16
-
-/* CM timeouts */
-#define CM_MIN_LOCAL_TIMEOUT (18)
-#define CM_LOCAL_TIMEOUT (1)
-#define CM_MIN_REMOTE_TIMEOUT (18)
-#define CM_REMOTE_TIMEOUT (2)
-#define CM_RETRIES 4
-
-typedef enum _ndi_cm_state
-{
- NDI_CM_IDLE,
- NDI_CM_CONNECTING_ATS_SENT, // ATS = Address Translation Service
- NDI_CM_CONNECTING_QPR_SENT, // QPR = Query path record
- NDI_CM_CONNECTING_REQ_SENT,
- NDI_CM_CONNECTING_REP_RCVD,
- NDI_CM_CONNECTING_REJ_RCVD,
- NDI_CM_CONNECTED,
- NDI_CM_BOUND,
- NDI_CM_LISTENING,
- NDI_CM_REP_SENT,
- NDI_CM_CONNECTED_DREP_SENT,
- NDI_CM_CONNECTED_DREQ_SENT,
-} ndi_cm_state_t;
-
-typedef struct _ib_qp ib_qp_t;
-
-typedef struct _ndi_qp_csq
-{
- IO_CSQ csq;
- ib_qp_t* h_qp;
- ib_query_handle_t h_query;
- LIST_ENTRY que;
- ndi_cm_state_t state;
- PIO_WORKITEM p_workitem;
- PIRP h_ioctl;
-} ndi_qp_csq_t;
-
-ib_api_status_t
-ndi_modify_qp(
- IN const ib_qp_handle_t h_qp,
- IN const ib_qp_mod_t* const p_qp_mod,
- IN const uint32_t buf_size,
- IN uint8_t* const p_outbuf);
-
-NTSTATUS
-ndi_req_cm(
- IN ib_qp_handle_t const h_qp,
- IN cl_ioctl_handle_t h_ioctl,
- IN ual_ndi_req_cm_ioctl_in_t *p_req
- );
-
-NTSTATUS
-ndi_rep_cm(
- IN ib_qp_handle_t const h_qp,
- IN net32_t const cid,
- IN cl_ioctl_handle_t h_ioctl,
- IN ual_ndi_rep_cm_ioctl_in_t *p_ndi_rep_cm
- );
-
-cl_status_t
-ndi_rtu_cm(
- IN ib_qp_handle_t const h_qp,
- IN PIRP p_irp
- );
-
-NTSTATUS
-ndi_dreq_cm(
- IN ib_qp_handle_t const h_qp,
- IN PIRP p_irp
- );
-
-void
-ndi_qp_flush_ques(
- IN ib_qp_handle_t h_qp );
-
-
-NTSTATUS
-ndi_qp_init(
- IN ib_qp_handle_t h_qp );
-
-void
-ndi_qp_destroy(
- IN ib_qp_handle_t h_qp );
-
-void
-ndi_qp_free(
- IN ib_qp_handle_t h_qp );
-
-#endif
-
+++ /dev/null
-/*
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
- *
- * This software is available to you under the OpenIB.org BSD license
- * below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id: al_proxy_verbs.c 548 2006-11-27 20:03:51Z leonidk $
- */
-
-
-#include <complib/comp_lib.h>
-#include <iba/ib_al.h>
-#include <iba/ib_al_ioctl.h>
-#include "al.h"
-#include "al_debug.h"
-
-#if defined(EVENT_TRACING)
-#ifdef offsetof
-#undef offsetof
-#endif
-#include "al_ndi_cq.tmh"
-#endif
-
-#include "al_dev.h"
-/* Get the internal definitions of apis for the proxy */
-#include "al_ca.h"
-#include "al_cq.h"
-#include "ib_common.h"
-
-/*******************************************************************
- *
- * Helpers
- *
- ******************************************************************/
-
-#pragma warning(disable:4706)
-static inline void __ndi_flush_que(
- IN ndi_cq_csq_t* p_ndi_csq,
- IN NTSTATUS completion_code
- )
-{
- PIRP Irp;
- while( Irp = IoCsqRemoveNextIrp( &p_ndi_csq->csq, NULL ) )
- {
- cl_ioctl_complete( Irp, completion_code, 0 );
- deref_al_obj( &p_ndi_csq->h_cq->obj );
- }
-}
-#pragma warning(default:4706)
-
-/*******************************************************************
- *
- * Callbacks
- *
- ******************************************************************/
-
-#pragma warning(disable:4706)
-void ndi_cq_compl_cb(
- IN const ib_cq_handle_t h_cq,
- IN void *cq_context )
-{
- PIRP Irp;
- ndi_cq_csq_t*p_ndi_csq = &h_cq->compl;
- UNUSED_PARAM( cq_context );
-
- AL_ENTER( AL_DBG_NDI );
-
- while( Irp = IoCsqRemoveNextIrp( &p_ndi_csq->csq, NULL ) )
- {
- Irp->IoStatus.Status = STATUS_SUCCESS;
- Irp->IoStatus.Information = 0;
- IoCompleteRequest( Irp, IO_NETWORK_INCREMENT );
- deref_al_obj( &p_ndi_csq->h_cq->obj );
- }
-
- AL_EXIT( AL_DBG_NDI );
-}
-#pragma warning(default:4706)
-
-void ndi_cq_error_cb(
- IN ib_async_event_rec_t *p_err_rec)
-{
- ib_cq_handle_t h_cq = p_err_rec->handle.h_cq;
- AL_ENTER( AL_DBG_NDI );
- __ndi_flush_que( &h_cq->compl, STATUS_INTERNAL_ERROR );
- __ndi_flush_que( &h_cq->error, STATUS_INTERNAL_ERROR );
- AL_EXIT( AL_DBG_NDI );
-}
-
-/*******************************************************************
- *
- * Public routines
- *
- ******************************************************************/
-
-/* flush a queue of pending requests */
-void
-ndi_cq_flush_ques(
- IN ib_cq_handle_t h_cq
- )
-{
- AL_ENTER( AL_DBG_NDI );
- if ( h_cq->pfn_user_comp_cb == ndi_cq_compl_cb )
- {
- __ndi_flush_que( &h_cq->compl, STATUS_CANCELLED );
- __ndi_flush_que( &h_cq->error, STATUS_CANCELLED );
- }
- AL_EXIT( AL_DBG_NDI );
-}
-
-
-/*******************************************************************
- *
- * CSQ
- *
- ******************************************************************/
-
-static VOID __ndi_insert_irp(
- IN PIO_CSQ Csq,
- IN PIRP Irp
- )
-{
- ndi_cq_csq_t *p_ndi_csq = (ndi_cq_csq_t*)Csq;
-
- AL_ENTER( AL_DBG_NDI );
- InsertTailList( &p_ndi_csq->que, &Irp->Tail.Overlay.ListEntry );
- AL_EXIT( AL_DBG_NDI );
-}
-
-static VOID __ndi_remove_irp(
- IN PIO_CSQ Csq,
- IN PIRP Irp
- )
-{
- UNUSED_PARAM( Csq );
-
- AL_ENTER( AL_DBG_NDI );
- RemoveEntryList( &Irp->Tail.Overlay.ListEntry );
- AL_EXIT( AL_DBG_NDI );
-}
-
-static PIRP __ndi_peek_next_irp(
- IN PIO_CSQ Csq,
- IN PIRP Irp,
- IN PVOID PeekContext
- )
-{
- PIRP nextIrp = NULL;
- PLIST_ENTRY nextEntry;
- PLIST_ENTRY listHead;
- ndi_cq_csq_t *p_ndi_csq = (ndi_cq_csq_t*)Csq;
-
- AL_ENTER( AL_DBG_NDI );
-
- listHead = &p_ndi_csq->que;
-
- //
- // If the IRP is NULL, we will start peeking from the listhead, else
- // we will start from that IRP onwards. This is done under the
- // assumption that new IRPs are always inserted at the tail.
- //
-
- if(Irp == NULL)
- nextEntry = listHead->Flink;
- else
- nextEntry = Irp->Tail.Overlay.ListEntry.Flink;
-
- while(nextEntry != listHead) {
- nextIrp = CONTAINING_RECORD(nextEntry, IRP, Tail.Overlay.ListEntry);
-
- //
- // If context is present, continue until you find a matching one.
- // Else you break out as you got next one.
- //
-
- if(PeekContext)
- {
- /* for now PeekContext is not used */
- }
- else
- {
- break;
- }
-
- nextIrp = NULL;
- nextEntry = nextEntry->Flink;
- }
-
- AL_EXIT( AL_DBG_NDI );
- return nextIrp;
-}
-
-static VOID __ndi_acquire_lock(
- IN PIO_CSQ Csq,
- OUT PKIRQL Irql
- )
-{
- ndi_cq_csq_t *p_ndi_csq = (ndi_cq_csq_t*)Csq;
- ib_cq_handle_t h_cq = p_ndi_csq->h_cq;
- UNUSED_PARAM( Irql );
-
- AL_ENTER( AL_DBG_NDI );
- cl_spinlock_acquire( &h_cq->obj.lock );
- AL_EXIT( AL_DBG_NDI );
-}
-
-static VOID __ndi_release_lock(
- IN PIO_CSQ Csq,
- IN KIRQL Irql
- )
-{
- ndi_cq_csq_t *p_ndi_csq = (ndi_cq_csq_t*)Csq;
- ib_cq_handle_t h_cq = p_ndi_csq->h_cq;
- UNUSED_PARAM( Irql );
-
- AL_ENTER( AL_DBG_NDI );
- cl_spinlock_release( &h_cq->obj.lock );
- AL_EXIT( AL_DBG_NDI );
-}
-
-static VOID __ndi_complete_cancelled_irp(
- IN PIO_CSQ Csq,
- IN PIRP Irp
- )
-{
- ndi_cq_csq_t *p_ndi_csq = (ndi_cq_csq_t*)Csq;
- ib_cq_handle_t h_cq = p_ndi_csq->h_cq;
-
- AL_ENTER( AL_DBG_NDI );
- cl_ioctl_complete( Irp, CL_CANCELED, 0 );
- deref_al_obj( &h_cq->obj );
- AL_EXIT( AL_DBG_NDI );
-}
-
-NTSTATUS
-ndi_cq_init(
- IN ib_cq_handle_t h_cq )
-{
-
- NTSTATUS status;
-
- AL_ENTER( AL_DBG_NDI );
-
- status = IoCsqInitialize( &h_cq->compl.csq,
- __ndi_insert_irp, __ndi_remove_irp,
- __ndi_peek_next_irp, __ndi_acquire_lock,
- __ndi_release_lock, __ndi_complete_cancelled_irp );
- if ( !NT_SUCCESS( status ) )
- goto exit;
-
- status = IoCsqInitialize( &h_cq->error.csq,
- __ndi_insert_irp, __ndi_remove_irp,
- __ndi_peek_next_irp, __ndi_acquire_lock,
- __ndi_release_lock, __ndi_complete_cancelled_irp );
- if ( !NT_SUCCESS( status ) )
- goto exit;
-
- InitializeListHead( &h_cq->compl.que );
- InitializeListHead( &h_cq->error.que );
- h_cq->compl.h_cq = h_cq;
- h_cq->error.h_cq = h_cq;
- status = STATUS_SUCCESS;
-
-exit:
- AL_EXIT( AL_DBG_NDI );
- return status;
-}
-
-
-
+++ /dev/null
-/*
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
- *
- * This software is available to you under the OpenIB.org BSD license
- * below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id: al_proxy.h 33 2005-07-11 19:51:17Z ftillier $
- */
-
-/*
- * Abstract:
- * This header file defines data structures for the kernel-mode NDI support
- *
- * Environment:
- * Kernel .
- */
-
-
-#ifndef _AL_NDI_CQ_H_
-#define _AL_NDI_CQ_H_
-
-void
-ndi_cq_flush_ques(
- IN ib_cq_handle_t h_cq );
-
-NTSTATUS
-ndi_cq_init(
- IN ib_cq_handle_t h_cq );
-
-void
-ndi_cq_compl_cb(
- IN const ib_cq_handle_t h_cq,
- IN void *cq_context );
-
-void
-ndi_cq_error_cb(
- IN ib_async_event_rec_t *p_err_rec );
-
-#endif
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_pnp.c 931 2008-01-31 09:20:41Z leonidk $\r
- */\r
-\r
-#include <iba/ib_al.h>\r
-\r
-#include "al.h"\r
-#include "al_debug.h"\r
-#if defined(EVENT_TRACING)\r
-#ifdef offsetof\r
-#undef offsetof\r
-#endif\r
-#include "al_pnp.tmh"\r
-#endif\r
-#include "al_mgr.h"\r
-#include "al_pnp.h"\r
-#include "ib_common.h"\r
-#include "al_ioc_pnp.h"\r
-\r
-\r
-#define PNP_CA_VECTOR_MIN 0\r
-#define PNP_CA_VECTOR_GROW 10\r
-\r
-\r
-/* ib_pnp_event_t values converted to text strings. */\r
-char* ib_pnp_event_str[] =\r
-{\r
- "IB_PNP_CA_ADD",\r
- "IB_PNP_CA_REMOVE",\r
- "IB_PNP_PORT_ADD",\r
- "IB_PNP_PORT_REMOVE",\r
- "IB_PNP_PORT_INIT",\r
- "IB_PNP_PORT_ARMED",\r
- "IB_PNP_PORT_ACTIVE",\r
- "IB_PNP_PORT_DOWN",\r
- "IB_PNP_PKEY_CHANGE",\r
- "IB_PNP_SM_CHANGE",\r
- "IB_PNP_GID_CHANGE",\r
- "IB_PNP_LID_CHANGE",\r
- "IB_PNP_SUBNET_TIMEOUT_CHANGE",\r
- "IB_PNP_IOU_ADD",\r
- "IB_PNP_IOU_REMOVE",\r
- "IB_PNP_IOC_ADD",\r
- "IB_PNP_IOC_REMOVE",\r
- "IB_PNP_IOC_PATH_ADD",\r
- "IB_PNP_IOC_PATH_REMOVE"\r
-};\r
-\r
-\r
-/*\r
- * Declarations.\r
- */\r
-static void\r
-__pnp_free(\r
- IN al_obj_t *p_obj );\r
-\r
-\r
-/*\r
- * Compares two context for inserts/lookups in a flexi map. Keys are the\r
- * address of the reg guid1, which is adjacent to the context guid2 (if exist).\r
- * This allows for a single call to cl_memcmp.\r
- */\r
-static intn_t\r
-__context_cmp128(\r
- IN const void* const p_key1,\r
- IN const void* const p_key2 )\r
-{\r
- return cl_memcmp( p_key1, p_key2, sizeof(uint64_t) * 2 );\r
-}\r
-\r
-/*\r
- * Compares two context for inserts/lookups in a flexi map. Keys are the\r
- * address of the reg guid1, which is adjacent to the context guid2 (if exist).\r
- * This allows for a single call to cl_memcmp.\r
- */\r
-static intn_t\r
-__context_cmp64(\r
- IN const void* const p_key1,\r
- IN const void* const p_key2 )\r
-{\r
- return cl_memcmp( p_key1, p_key2, sizeof(uint64_t) );\r
-}\r
-\r
-\r
-/*\r
- * Event structures for queuing to the async proc manager.\r
- */\r
-typedef struct _al_pnp_ca_change\r
-{\r
- cl_async_proc_item_t async_item;\r
- al_ci_ca_t *p_ci_ca;\r
- ib_ca_attr_t *p_new_ca_attr;\r
-\r
-} al_pnp_ca_change_t;\r
-\r
-\r
-typedef struct _al_pnp_ca_event\r
-{\r
- cl_async_proc_item_t async_item;\r
- ib_pnp_event_t pnp_event;\r
- al_ci_ca_t *p_ci_ca;\r
- uint8_t port_index;\r
-\r
-} al_pnp_ca_event_t;\r
-\r
-\r
-typedef struct _al_pnp_reg_event\r
-{\r
- cl_async_proc_item_t async_item;\r
- al_pnp_t *p_reg;\r
-\r
-} al_pnp_reg_event_t;\r
-\r
-\r
-/* PnP Manager structure. */\r
-typedef struct _al_pnp_mgr\r
-{\r
- al_obj_t obj;\r
-\r
- cl_qlist_t ca_reg_list;\r
- cl_qlist_t port_reg_list;\r
-\r
- cl_ptr_vector_t ca_vector;\r
-\r
- cl_async_proc_item_t async_item;\r
- boolean_t async_item_is_busy;\r
-\r
-} al_pnp_mgr_t;\r
-\r
-\r
-/*\r
- * PnP Manager instance, creation, destruction.\r
- */\r
-\r
-/* Global instance of the PnP manager. */\r
-al_pnp_mgr_t *gp_pnp = NULL;\r
-\r
-\r
-static void\r
-__pnp_check_events(\r
- IN cl_async_proc_item_t* p_item );\r
-\r
-static void\r
-__al_pnp_process_dereg(\r
- IN cl_async_proc_item_t* p_item );\r
-\r
-\r
-ib_api_status_t\r
-create_pnp(\r
- IN al_obj_t* const p_parent_obj )\r
-{\r
- ib_api_status_t status;\r
- cl_status_t cl_status;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- CL_ASSERT( gp_pnp == NULL );\r
-\r
- gp_pnp = (al_pnp_mgr_t*)cl_zalloc( sizeof(al_pnp_mgr_t) );\r
- if( !gp_pnp )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Failed to allocate PnP manager.\n") );\r
- return IB_INSUFFICIENT_MEMORY;\r
- }\r
-\r
- cl_qlist_init( &gp_pnp->ca_reg_list );\r
- cl_qlist_init( &gp_pnp->port_reg_list );\r
- construct_al_obj( &gp_pnp->obj, AL_OBJ_TYPE_PNP_MGR );\r
- cl_ptr_vector_construct( &gp_pnp->ca_vector );\r
-\r
- cl_status = cl_ptr_vector_init( &gp_pnp->ca_vector, PNP_CA_VECTOR_MIN,\r
- PNP_CA_VECTOR_GROW );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- __pnp_free( &gp_pnp->obj );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("cl_ptr_vector_init failed with status %#x.\n",\r
- cl_status) );\r
- return IB_ERROR;\r
- }\r
-\r
- gp_pnp->async_item.pfn_callback = __pnp_check_events;\r
-\r
- status = init_al_obj( &gp_pnp->obj, NULL, TRUE, NULL, NULL, __pnp_free );\r
- if( status != IB_SUCCESS )\r
- {\r
- __pnp_free( &gp_pnp->obj );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("init_al_obj() failed with status %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
- status = attach_al_obj( p_parent_obj, &gp_pnp->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- gp_pnp->obj.pfn_destroy( &gp_pnp->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- /* Release the reference taken in init_al_obj. */\r
- deref_al_obj( &gp_pnp->obj );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return( IB_SUCCESS );\r
-}\r
-\r
-\r
-static void\r
-__pnp_free(\r
- IN al_obj_t *p_obj )\r
-{\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- CL_ASSERT( PARENT_STRUCT( p_obj, al_pnp_mgr_t, obj ) == gp_pnp );\r
- CL_ASSERT( cl_is_qlist_empty( &gp_pnp->ca_reg_list ) );\r
- CL_ASSERT( cl_is_qlist_empty( &gp_pnp->port_reg_list ) );\r
- UNUSED_PARAM( p_obj );\r
-\r
- /* All CA's should have been removed by now. */\r
- CL_ASSERT( !cl_ptr_vector_get_size( &gp_pnp->ca_vector ) );\r
- cl_ptr_vector_destroy( &gp_pnp->ca_vector );\r
-\r
- destroy_al_obj( &gp_pnp->obj );\r
- cl_free( gp_pnp );\r
- gp_pnp = NULL;\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__pnp_reg_destroying(\r
- IN al_obj_t *p_obj )\r
-{\r
- al_pnp_t *p_reg;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_reg = PARENT_STRUCT( p_obj, al_pnp_t, obj );\r
-\r
- /* Reference the registration entry while we queue it to our PnP thread. */\r
- ref_al_obj( &p_reg->obj );\r
-\r
- /* Queue the registration for removal from the list. */\r
- cl_async_proc_queue( gp_async_pnp_mgr, &p_reg->dereg_item );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__al_pnp_process_dereg(\r
- IN cl_async_proc_item_t* p_item )\r
-{\r
- al_pnp_t* p_reg;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_reg = PARENT_STRUCT( p_item, al_pnp_t, dereg_item );\r
-\r
- /* Remove the registration information from the list. */\r
- switch( pnp_get_class( p_reg->pnp_class ) )\r
- {\r
- case IB_PNP_CA:\r
- cl_qlist_remove_item( &gp_pnp->ca_reg_list, &p_reg->list_item );\r
- break;\r
-\r
- case IB_PNP_PORT:\r
- cl_qlist_remove_item( &gp_pnp->port_reg_list, &p_reg->list_item );\r
- break;\r
-\r
- default:\r
- CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_CA ||\r
- pnp_get_class( p_reg->pnp_class ) == IB_PNP_PORT );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Invalid PnP registartion type.\n") );\r
- }\r
-\r
- /* Release the reference we took for processing the deregistration. */\r
- deref_al_obj( &p_reg->obj );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__pnp_reg_cleanup(\r
- IN al_obj_t *p_obj )\r
-{\r
- al_pnp_t *p_reg;\r
- cl_fmap_item_t *p_item;\r
- IRP *p_irp;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_reg = PARENT_STRUCT( p_obj, al_pnp_t, obj );\r
-\r
- /* Cleanup the context list. */\r
- while( cl_fmap_count( &p_reg->context_map ) )\r
- {\r
- p_item = cl_fmap_tail( &p_reg->context_map );\r
- cl_fmap_remove_item( &p_reg->context_map, p_item );\r
- cl_free( p_item );\r
- }\r
-\r
- p_irp = InterlockedExchangePointer( &p_reg->p_rearm_irp, NULL );\r
- if( p_irp )\r
- {\r
-#pragma warning(push, 3)\r
- IoSetCancelRoutine( p_irp, NULL );\r
-#pragma warning(pop)\r
- /* Complete the IRP. */\r
- p_irp->IoStatus.Status = STATUS_CANCELLED;\r
- p_irp->IoStatus.Information = 0;\r
- IoCompleteRequest( p_irp, IO_NO_INCREMENT );\r
- }\r
-\r
- if( p_reg->p_dereg_irp )\r
- {\r
- p_reg->p_dereg_irp->IoStatus.Status = STATUS_SUCCESS;\r
- p_reg->p_dereg_irp->IoStatus.Information = 0;\r
- IoCompleteRequest( p_reg->p_dereg_irp, IO_NO_INCREMENT );\r
- p_reg->p_dereg_irp = NULL;\r
- }\r
-\r
- /* Dereference the PnP manager. */\r
- deref_al_obj( &gp_pnp->obj );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__pnp_reg_free(\r
- IN al_obj_t *p_obj )\r
-{\r
- al_pnp_t *p_reg;\r
- cl_fmap_item_t *p_item;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_reg = PARENT_STRUCT( p_obj, al_pnp_t, obj );\r
-\r
- /* Cleanup the context list. */\r
- while( cl_fmap_count( &p_reg->context_map ) )\r
- {\r
- p_item = cl_fmap_tail( &p_reg->context_map );\r
- cl_fmap_remove_item( &p_reg->context_map, p_item );\r
- cl_free( p_item );\r
- }\r
-\r
- /* Free the registration structure. */\r
- destroy_al_obj( &p_reg->obj );\r
- cl_free( p_reg );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-/*\r
- * Helper functions.\r
- */\r
-\r
-\r
-\r
-/*\r
- * Returns the context structure stored in a registration for\r
- * a given CA or port GUID.\r
- */\r
-al_pnp_context_t*\r
-pnp_get_context(\r
- IN const al_pnp_t* const p_reg,\r
- IN const void* const p_key )\r
-{\r
- cl_fmap_item_t *p_context_item;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- /* Search the context list for this CA. */\r
- p_context_item = cl_fmap_get( &p_reg->context_map, p_key );\r
- if( p_context_item != cl_fmap_end( &p_reg->context_map ) )\r
- {\r
- AL_EXIT( AL_DBG_PNP );\r
- return PARENT_STRUCT( p_context_item, al_pnp_context_t, map_item );\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return NULL;\r
-}\r
-\r
-\r
-void\r
-pnp_reg_complete(\r
- IN al_pnp_t* const p_reg )\r
-{\r
- ib_pnp_rec_t user_rec;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- /* Notify the user that the registration is complete. */\r
- if( (pnp_get_flag( p_reg->pnp_class ) & IB_PNP_FLAG_REG_COMPLETE) )\r
- {\r
- /* Setup the PnP record for the callback. */\r
- cl_memclr( &user_rec, sizeof(user_rec) );\r
- user_rec.h_pnp = p_reg;\r
- user_rec.pnp_event = IB_PNP_REG_COMPLETE;\r
- user_rec.pnp_context = (void*)p_reg->obj.context;\r
-\r
- /* Invoke the user callback. */\r
- p_reg->pfn_pnp_cb( &user_rec );\r
- }\r
-\r
- if( pnp_get_flag( p_reg->pnp_class ) & IB_PNP_FLAG_REG_SYNC )\r
- {\r
- KeSetEvent( p_reg->p_sync_event, 0, FALSE );\r
- /*\r
- * Proxy synchronizes PnP callbacks with registration, and thus can\r
- * safely set the UM_EXPORT subtype after al_reg_pnp returns.\r
- */\r
- if( p_reg->obj.type & AL_OBJ_SUBTYPE_UM_EXPORT )\r
- ObDereferenceObject( p_reg->p_sync_event );\r
- p_reg->p_sync_event = NULL;\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-/*\r
- * User notification. Formats the PnP record delivered by the\r
- * callback, invokes the callback, and updates the contexts.\r
- */\r
-static ib_api_status_t\r
-__pnp_notify_user(\r
- IN al_pnp_t* const p_reg,\r
- IN al_pnp_context_t* const p_context,\r
- IN const al_pnp_ca_event_t* const p_event_rec )\r
-{\r
- ib_api_status_t status;\r
- union\r
- {\r
- ib_pnp_rec_t user_rec;\r
- ib_pnp_ca_rec_t ca_rec;\r
- ib_pnp_port_rec_t port_rec;\r
- } u;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- CL_ASSERT( p_reg );\r
- CL_ASSERT( p_context );\r
- CL_ASSERT( p_event_rec );\r
-\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP,\r
- ("p_event_rec->pnp_event = 0x%x (%s)\n",\r
- p_event_rec->pnp_event, ib_get_pnp_event_str( p_event_rec->pnp_event )) );\r
-\r
- /* Setup the PnP record for the callback. */\r
- cl_memclr( &u, sizeof(u) );\r
- u.user_rec.h_pnp = p_reg;\r
- u.user_rec.pnp_event = p_event_rec->pnp_event;\r
- u.user_rec.pnp_context = (void*)p_reg->obj.context;\r
- u.user_rec.context = (void*)p_context->context;\r
-\r
- switch( p_event_rec->pnp_event )\r
- {\r
- case IB_PNP_CA_ADD:\r
- /* Copy the attributes for use in calling users back. */\r
- u.ca_rec.p_ca_attr = ib_copy_ca_attr(\r
- p_event_rec->p_ci_ca->p_user_attr,\r
- p_event_rec->p_ci_ca->p_pnp_attr );\r
-\r
- /* Fall through */\r
- case IB_PNP_CA_REMOVE:\r
- u.user_rec.guid = p_event_rec->p_ci_ca->p_pnp_attr->ca_guid;\r
- break;\r
-\r
- case IB_PNP_PORT_ADD:\r
- case IB_PNP_PORT_INIT:\r
- case IB_PNP_PORT_ARMED:\r
- case IB_PNP_PORT_ACTIVE:\r
- case IB_PNP_PORT_DOWN:\r
- case IB_PNP_PKEY_CHANGE:\r
- case IB_PNP_SM_CHANGE:\r
- case IB_PNP_GID_CHANGE:\r
- case IB_PNP_LID_CHANGE:\r
- case IB_PNP_SUBNET_TIMEOUT_CHANGE:\r
- /* Copy the attributes for use in calling users back. */\r
- u.port_rec.p_ca_attr = ib_copy_ca_attr(\r
- p_event_rec->p_ci_ca->p_user_attr,\r
- p_event_rec->p_ci_ca->p_pnp_attr );\r
-\r
- /* Setup the port attribute pointer. */\r
- u.port_rec.p_port_attr =\r
- &u.port_rec.p_ca_attr->p_port_attr[p_event_rec->port_index];\r
-\r
- /* Fall through */\r
- case IB_PNP_PORT_REMOVE:\r
- u.user_rec.guid = p_event_rec->p_ci_ca->p_pnp_attr->p_port_attr[ \r
- p_event_rec->port_index].port_guid;\r
- break;\r
-\r
- case IB_PNP_REG_COMPLETE:\r
- break;\r
-\r
- default:\r
- /* Invalid event type. */\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Invalid event type (%d).\n", p_event_rec->pnp_event) );\r
- CL_ASSERT( p_event_rec->pnp_event == IB_PNP_CA_ADD ||\r
- p_event_rec->pnp_event == IB_PNP_PORT_ADD ||\r
- p_event_rec->pnp_event == IB_PNP_PORT_INIT ||\r
- p_event_rec->pnp_event == IB_PNP_PORT_ACTIVE ||\r
- p_event_rec->pnp_event == IB_PNP_PORT_DOWN ||\r
- p_event_rec->pnp_event == IB_PNP_PKEY_CHANGE ||\r
- p_event_rec->pnp_event == IB_PNP_SM_CHANGE ||\r
- p_event_rec->pnp_event == IB_PNP_GID_CHANGE ||\r
- p_event_rec->pnp_event == IB_PNP_LID_CHANGE ||\r
- p_event_rec->pnp_event == IB_PNP_SUBNET_TIMEOUT_CHANGE ||\r
- p_event_rec->pnp_event == IB_PNP_CA_REMOVE ||\r
- p_event_rec->pnp_event == IB_PNP_PORT_REMOVE );\r
- return IB_SUCCESS;\r
- }\r
-\r
- /* Invoke the user callback. */\r
- status = p_reg->pfn_pnp_cb( &u.user_rec );\r
-\r
- if( status == IB_SUCCESS )\r
- {\r
- /* Store the user's event context in the context block. */\r
- p_context->context = u.user_rec.context;\r
- }\r
- else\r
- {\r
- cl_fmap_remove_item( &p_reg->context_map, &p_context->map_item );\r
- cl_free( p_context );\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return status;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Context creation.\r
- */\r
-al_pnp_context_t*\r
-pnp_create_context(\r
- IN al_pnp_t* const p_reg,\r
- IN const void* const p_key )\r
-{\r
- al_pnp_context_t *p_context;\r
- cl_fmap_item_t *p_item;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- CL_ASSERT( p_reg );\r
-\r
- /* No context exists for this port. Create one. */\r
- p_context = (al_pnp_context_t*)cl_pzalloc( sizeof(al_pnp_context_t) );\r
- if( !p_context )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Failed to cl_zalloc al_pnp_context_t (%I64d bytes).\n",\r
- sizeof(al_pnp_context_t)) );\r
- return NULL;\r
- }\r
- /* Store the GUID in the context record. */\r
- cl_memcpy(&p_context->guid, p_key, sizeof(ib_net64_t) * 2);\r
-\r
- /* Add the context to the context list. */\r
- p_item = cl_fmap_insert( &p_reg->context_map, &p_context->guid,\r
- &p_context->map_item );\r
- if( p_item != &p_context->map_item )\r
- {\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP,\r
- ("p_context is already in context map %I64x \n",p_context->guid));\r
- p_context = NULL;\r
- }\r
- \r
- \r
- AL_EXIT( AL_DBG_PNP );\r
- return p_context;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Report all local port information. This notifies the user of PORT_ADD\r
- * events, along with port state events (PORT_INIT, PORT_ACTIVE).\r
- */\r
-static void\r
-__pnp_port_notify(\r
- IN al_pnp_t *p_reg,\r
- IN al_ci_ca_t *p_ci_ca )\r
-{\r
- ib_api_status_t status;\r
- al_pnp_context_t *p_context;\r
- ib_port_attr_t *p_port_attr;\r
- al_pnp_ca_event_t event_rec;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- event_rec.p_ci_ca = p_ci_ca;\r
-\r
- for( event_rec.port_index = 0;\r
- event_rec.port_index < p_ci_ca->num_ports;\r
- event_rec.port_index++ )\r
- {\r
- p_port_attr = p_ci_ca->p_pnp_attr->p_port_attr;\r
- p_port_attr += event_rec.port_index;\r
-\r
- /* Create a new context for user port information. */\r
- p_context = pnp_create_context( p_reg, &p_port_attr->port_guid);\r
- if( !p_context )\r
- continue;\r
-\r
- /* Notify the user of the port's existence. */\r
- event_rec.pnp_event = IB_PNP_PORT_ADD;\r
- status = __pnp_notify_user( p_reg, p_context, &event_rec );\r
- if( status != IB_SUCCESS )\r
- continue;\r
-\r
- /* Generate a port down event if the port is currently down. */\r
- if( p_port_attr->link_state == IB_LINK_DOWN )\r
- {\r
- event_rec.pnp_event = IB_PNP_PORT_DOWN;\r
- __pnp_notify_user( p_reg, p_context, &event_rec );\r
- }\r
- else\r
- {\r
- /* Generate port init event. */\r
- if( p_port_attr->link_state >= IB_LINK_INIT )\r
- {\r
- event_rec.pnp_event = IB_PNP_PORT_INIT;\r
- status = __pnp_notify_user( p_reg, p_context, &event_rec );\r
- if( status != IB_SUCCESS )\r
- continue;\r
- }\r
- /* Generate port armed event. */\r
- if( p_port_attr->link_state >= IB_LINK_ARMED )\r
- {\r
- event_rec.pnp_event = IB_PNP_PORT_ARMED;\r
- status = __pnp_notify_user( p_reg, p_context, &event_rec );\r
- if( status != IB_SUCCESS )\r
- continue;\r
- }\r
- /* Generate port up event. */\r
- if( p_port_attr->link_state >= IB_LINK_ACTIVE )\r
- {\r
- event_rec.pnp_event = IB_PNP_PORT_ACTIVE;\r
- __pnp_notify_user( p_reg, p_context, &event_rec );\r
- }\r
- }\r
- }\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-/*\r
- * Registration and deregistration.\r
- */\r
-static void\r
-__pnp_reg_notify(\r
- IN al_pnp_t* const p_reg )\r
-{\r
- al_pnp_ca_event_t event_rec;\r
- size_t i;\r
- al_pnp_context_t *p_context;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- for( i = 0; i < cl_ptr_vector_get_size( &gp_pnp->ca_vector ); i++ )\r
- {\r
- event_rec.p_ci_ca = (al_ci_ca_t*)\r
- cl_ptr_vector_get( &gp_pnp->ca_vector, i );\r
- if( !event_rec.p_ci_ca )\r
- continue;\r
-\r
- switch( pnp_get_class( p_reg->pnp_class ) )\r
- {\r
- case IB_PNP_CA:\r
- event_rec.pnp_event = IB_PNP_CA_ADD;\r
- p_context = pnp_create_context( p_reg,\r
- &event_rec.p_ci_ca->p_pnp_attr->ca_guid);\r
- if( !p_context )\r
- break;\r
-\r
- __pnp_notify_user( p_reg, p_context, &event_rec );\r
- break;\r
-\r
- case IB_PNP_PORT:\r
- __pnp_port_notify( p_reg, event_rec.p_ci_ca );\r
- break;\r
-\r
- default:\r
- CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_CA ||\r
- pnp_get_class( p_reg->pnp_class ) == IB_PNP_PORT );\r
- continue;\r
- }\r
- }\r
-\r
- /* Notify the user that the registration is complete. */\r
- pnp_reg_complete( p_reg );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__al_pnp_process_reg(\r
- IN cl_async_proc_item_t* p_item )\r
-{\r
- al_pnp_t* p_reg;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_reg = PARENT_STRUCT( p_item, al_pnp_t, async_item );\r
-\r
- /* Add the registrant to the list. */\r
- switch( pnp_get_class( p_reg->pnp_class ) )\r
- {\r
- case IB_PNP_CA:\r
- cl_qlist_insert_tail( &gp_pnp->ca_reg_list, &p_reg->list_item );\r
- break;\r
-\r
- case IB_PNP_PORT:\r
- cl_qlist_insert_tail( &gp_pnp->port_reg_list, &p_reg->list_item );\r
- break;\r
-\r
- default:\r
- CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_CA ||\r
- pnp_get_class( p_reg->pnp_class ) == IB_PNP_PORT );\r
- }\r
-\r
- /* Generate all relevant events for the registration. */\r
- __pnp_reg_notify( p_reg );\r
-\r
- /* Release the reference taken in init_al_obj. */\r
- deref_al_obj( &p_reg->obj );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-ib_api_status_t\r
-ib_reg_pnp(\r
- IN const ib_al_handle_t h_al,\r
- IN const ib_pnp_req_t* const p_pnp_req,\r
- OUT ib_pnp_handle_t* const ph_pnp )\r
-{\r
- ib_api_status_t status;\r
- KEVENT event;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") );\r
- return IB_INVALID_AL_HANDLE;\r
- }\r
- if( !p_pnp_req || !ph_pnp )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
- return IB_INVALID_PARAMETER;\r
- }\r
-\r
- if( pnp_get_flag( p_pnp_req->pnp_class ) & IB_PNP_FLAG_REG_SYNC )\r
- KeInitializeEvent( &event, SynchronizationEvent, FALSE );\r
-\r
- status = al_reg_pnp( h_al, p_pnp_req, &event, ph_pnp );\r
- /* Release the reference taken in init_al_obj. */\r
- if( status == IB_SUCCESS )\r
- {\r
- deref_al_obj( &(*ph_pnp)->obj );\r
- \r
- /* Wait for registration to complete if synchronous. */\r
- if( pnp_get_flag( p_pnp_req->pnp_class ) & IB_PNP_FLAG_REG_SYNC )\r
- {\r
- KeWaitForSingleObject(\r
- &event, Executive, KernelMode, TRUE, NULL );\r
- }\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return status;\r
-}\r
-\r
-\r
-ib_api_status_t\r
-al_reg_pnp(\r
- IN const ib_al_handle_t h_al,\r
- IN const ib_pnp_req_t* const p_pnp_req,\r
- IN KEVENT *p_sync_event,\r
- OUT ib_pnp_handle_t* const ph_pnp )\r
-{\r
- ib_api_status_t status;\r
- al_pnp_t* p_reg;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- /* Allocate a new registration info structure. */\r
- p_reg = (al_pnp_t*)cl_zalloc( sizeof(al_pnp_t) );\r
- if( !p_reg )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Failed to cl_zalloc al_pnp_t (%I64d bytes).\n",\r
- sizeof(al_pnp_t)) );\r
- return( IB_INSUFFICIENT_MEMORY );\r
- }\r
-\r
- /* Initialize the registration info. */\r
- construct_al_obj( &p_reg->obj, AL_OBJ_TYPE_H_PNP );\r
- switch(pnp_get_class(p_pnp_req->pnp_class)){\r
- case IB_PNP_IOU:\r
- case IB_PNP_IOC:\r
- cl_fmap_init( &p_reg->context_map, __context_cmp128 );\r
- break;\r
- case IB_PNP_PORT:\r
- case IB_PNP_CA:\r
- cl_fmap_init( &p_reg->context_map, __context_cmp64 );\r
- break;\r
- default:\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("unknown pnp_class 0x%x.\n", pnp_get_class(p_pnp_req->pnp_class)));\r
- }\r
- status = init_al_obj( &p_reg->obj, p_pnp_req->pnp_context, TRUE,\r
- __pnp_reg_destroying, __pnp_reg_cleanup, __pnp_reg_free );\r
- if( status != IB_SUCCESS )\r
- {\r
- __pnp_reg_free( &p_reg->obj );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("init_al_obj() failed with status %s.\n", ib_get_err_str(status)) );\r
- return( status );\r
- }\r
- status = attach_al_obj( &h_al->obj, &p_reg->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_reg->obj.pfn_destroy( &p_reg->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- /* Reference the PnP Manager. */\r
- ref_al_obj( &gp_pnp->obj );\r
-\r
- /* Copy the request information. */\r
- p_reg->pnp_class = p_pnp_req->pnp_class;\r
- p_reg->pfn_pnp_cb = p_pnp_req->pfn_pnp_cb;\r
-\r
- p_reg->p_sync_event = p_sync_event;\r
-\r
- /* Send IOU/IOC registration to the IOC PnP manager. */\r
- if( pnp_get_class(p_pnp_req->pnp_class) == IB_PNP_IOU ||\r
- pnp_get_class(p_pnp_req->pnp_class) == IB_PNP_IOC )\r
- {\r
- p_reg->async_item.pfn_callback = ioc_pnp_process_reg;\r
- p_reg->dereg_item.pfn_callback = ioc_pnp_process_dereg;\r
- }\r
- else\r
- {\r
- p_reg->async_item.pfn_callback = __al_pnp_process_reg;\r
- p_reg->dereg_item.pfn_callback = __al_pnp_process_dereg;\r
- }\r
-\r
- /* Queue the registrant for addition to the list. */\r
- ref_al_obj( &p_reg->obj );\r
- cl_async_proc_queue( gp_async_pnp_mgr, &p_reg->async_item );\r
-\r
- /* Set the user handle. */\r
- *ph_pnp = p_reg;\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return( IB_SUCCESS );\r
-}\r
-\r
-\r
-ib_api_status_t\r
-ib_dereg_pnp(\r
- IN const ib_pnp_handle_t h_pnp,\r
- IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL )\r
-{\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- if( AL_OBJ_INVALID_HANDLE( h_pnp, AL_OBJ_TYPE_H_PNP ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- ref_al_obj( &h_pnp->obj );\r
- h_pnp->obj.pfn_destroy( &h_pnp->obj, pfn_destroy_cb );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return( IB_SUCCESS );\r
-}\r
-\r
-\r
-/*\r
- * CA event handling.\r
- */\r
-static void\r
-__pnp_process_add_ca(\r
- IN cl_async_proc_item_t *p_item )\r
-{\r
- al_pnp_t *p_reg;\r
- al_pnp_ca_event_t *p_event_rec;\r
- cl_list_item_t *p_reg_item;\r
- al_pnp_context_t *p_context;\r
- cl_status_t cl_status;\r
- size_t i;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_event_rec = PARENT_STRUCT( p_item, al_pnp_ca_event_t, async_item );\r
-\r
- cl_spinlock_acquire( &gp_pnp->obj.lock );\r
- /* Add the CA to the CA vector. */\r
- for( i = 0; i < cl_ptr_vector_get_size( &gp_pnp->ca_vector ); i++ )\r
- {\r
- if( !cl_ptr_vector_get( &gp_pnp->ca_vector, i ) )\r
- {\r
- cl_status = cl_ptr_vector_set( &gp_pnp->ca_vector, i,\r
- p_event_rec->p_ci_ca );\r
- CL_ASSERT( cl_status == CL_SUCCESS );\r
- break;\r
- }\r
- }\r
- cl_spinlock_release( &gp_pnp->obj.lock );\r
- CL_ASSERT( i < cl_ptr_vector_get_size( &gp_pnp->ca_vector ) );\r
-\r
- /* Walk the list of registrants for notification. */\r
- for( p_reg_item = cl_qlist_head( &gp_pnp->ca_reg_list );\r
- p_reg_item != cl_qlist_end( &gp_pnp->ca_reg_list );\r
- p_reg_item = cl_qlist_next( p_reg_item ) )\r
- {\r
- p_reg = PARENT_STRUCT( p_reg_item, al_pnp_t, list_item );\r
- CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_CA );\r
-\r
- /* Allocate the user's context. */\r
- /*\r
- * Moving this allocation to the pnp_ca_event call is left as an\r
- * exercise to the open source community.\r
- */\r
- p_context = pnp_create_context( p_reg,\r
- &p_event_rec->p_ci_ca->p_pnp_attr->ca_guid);\r
- if( !p_context )\r
- continue;\r
-\r
- /* Notify the user. */\r
- __pnp_notify_user( p_reg, p_context, p_event_rec );\r
- }\r
-\r
- /* Generate port add and state events. */\r
- for( p_reg_item = cl_qlist_head( &gp_pnp->port_reg_list );\r
- p_reg_item != cl_qlist_end( &gp_pnp->port_reg_list );\r
- p_reg_item = cl_qlist_next( p_reg_item ) )\r
- {\r
- p_reg = PARENT_STRUCT( p_reg_item, al_pnp_t, list_item );\r
- CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_PORT );\r
- __pnp_port_notify( p_reg, p_event_rec->p_ci_ca );\r
- }\r
-\r
- /* Cleanup the event record. */\r
- deref_al_obj( &gp_pnp->obj );\r
- cl_free( p_event_rec );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__pnp_process_remove_port(\r
- IN const al_pnp_ca_event_t* const p_event_rec )\r
-{\r
- ib_api_status_t status;\r
- al_pnp_t *p_reg;\r
- cl_list_item_t *p_reg_item;\r
- uint8_t port_index;\r
- al_pnp_context_t *p_context;\r
- al_pnp_ca_event_t event_rec;\r
- ib_port_attr_t *p_port_attr;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- CL_ASSERT( p_event_rec->p_ci_ca->p_pnp_attr );\r
- CL_ASSERT( p_event_rec->p_ci_ca->p_pnp_attr->p_port_attr );\r
-\r
- /* Notify the IOC PnP manager of the port down event. */\r
- //***TODO: Make some call to the IOC PnP manager here, such as\r
- //***TODO: al_ioc_pnp_process_port_down( p_event_rec->p_ci_ca,\r
- //***TODO: p_event_rec->port_index );\r
-\r
- cl_memclr( &event_rec, sizeof( al_pnp_ca_event_t ) );\r
- event_rec = *p_event_rec;\r
-\r
- /* Walk the list of registrants for notification. */\r
- for( p_reg_item = cl_qlist_tail( &gp_pnp->port_reg_list );\r
- p_reg_item != cl_qlist_end( &gp_pnp->port_reg_list );\r
- p_reg_item = cl_qlist_prev( p_reg_item ) )\r
- {\r
- p_reg = PARENT_STRUCT( p_reg_item, al_pnp_t, list_item );\r
-\r
- CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_PORT );\r
-\r
- for( port_index = 0;\r
- port_index < p_event_rec->p_ci_ca->num_ports;\r
- port_index++ )\r
- {\r
- p_port_attr = p_event_rec->p_ci_ca->p_pnp_attr->p_port_attr;\r
- p_port_attr += port_index;\r
- p_context = pnp_get_context( p_reg, &p_port_attr->port_guid );\r
- if( !p_context )\r
- continue;\r
-\r
- event_rec.port_index = port_index;\r
-\r
- if( p_port_attr->link_state >= IB_LINK_INIT )\r
- {\r
- /* Notify the user of the port down. */\r
- event_rec.pnp_event = IB_PNP_PORT_DOWN;\r
- status = __pnp_notify_user( p_reg, p_context, &event_rec );\r
- if( status != IB_SUCCESS )\r
- continue;\r
- }\r
-\r
- /* Notify the user of the port remove. */\r
- event_rec.pnp_event = IB_PNP_PORT_REMOVE;\r
- status = __pnp_notify_user( p_reg, p_context, &event_rec );\r
- if( status == IB_SUCCESS )\r
- {\r
- /*\r
- * Remove the port context from the registrant's\r
- * context list.\r
- */\r
- cl_fmap_remove_item( &p_reg->context_map,\r
- &p_context->map_item );\r
- /* Free the context. */\r
- cl_free( p_context );\r
- }\r
- }\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-static void\r
-__pnp_process_remove_ca(\r
- IN cl_async_proc_item_t *p_item )\r
-{\r
- al_pnp_t *p_reg;\r
- al_pnp_ca_event_t *p_event_rec;\r
- cl_list_item_t *p_reg_item;\r
- al_pnp_context_t *p_context = NULL;\r
- size_t i;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_event_rec = PARENT_STRUCT( p_item, al_pnp_ca_event_t, async_item );\r
-\r
- /* Generate port remove events. */\r
- __pnp_process_remove_port( p_event_rec );\r
-\r
- /* Walk the list of registrants for notification. */\r
- for( p_reg_item = cl_qlist_tail( &gp_pnp->ca_reg_list );\r
- p_reg_item != cl_qlist_end( &gp_pnp->ca_reg_list );\r
- p_reg_item = cl_qlist_prev( p_reg_item ) )\r
- {\r
- p_reg = PARENT_STRUCT( p_reg_item, al_pnp_t, list_item );\r
-\r
- CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_CA );\r
-\r
- /* Search the context list for this CA. */\r
- p_context =\r
- pnp_get_context( p_reg, &p_event_rec->p_ci_ca->p_pnp_attr->ca_guid);\r
-\r
- /* Make sure we found a context. */\r
- if( !p_context )\r
- continue;\r
-\r
- /* Notify the user. */\r
- if( __pnp_notify_user( p_reg, p_context, p_event_rec ) == IB_SUCCESS )\r
- {\r
- /* Remove the context from the context list. */\r
- cl_fmap_remove_item( &p_reg->context_map, &p_context->map_item );\r
-\r
- /* Deallocate the context block. */\r
- cl_free( p_context );\r
- }\r
- }\r
-\r
- /* Remove the CA from the CA vector. */\r
- for( i = 0; i < cl_ptr_vector_get_size( &gp_pnp->ca_vector ); i++ )\r
- {\r
- if( cl_ptr_vector_get( &gp_pnp->ca_vector, i ) ==\r
- p_event_rec->p_ci_ca )\r
- {\r
- cl_ptr_vector_remove( &gp_pnp->ca_vector, i );\r
- break;\r
- }\r
- }\r
-\r
- /* Release the reference to the CA. */\r
- deref_al_obj( &p_event_rec->p_ci_ca->obj );\r
-\r
- /* Cleanup the event record. */\r
- deref_al_obj( &gp_pnp->obj );\r
- cl_free( p_event_rec );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-ib_api_status_t\r
-pnp_ca_event(\r
- IN al_ci_ca_t* const p_ci_ca,\r
- IN const ib_pnp_event_t event )\r
-{\r
- ib_api_status_t status;\r
- cl_status_t cl_status;\r
- al_pnp_ca_event_t *p_event_rec;\r
- ib_ca_attr_t *p_old_ca_attr;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- /* Allocate an event record. */\r
- p_event_rec = (al_pnp_ca_event_t*)cl_zalloc( sizeof(al_pnp_ca_event_t) );\r
- if( !p_event_rec )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Failed to cl_zalloc al_pnp_ca_event_t (%I64d bytes).\n",\r
- sizeof(al_pnp_ca_event_t)) );\r
- return IB_INSUFFICIENT_MEMORY;\r
- }\r
-\r
- /* Store the event type. */\r
- p_event_rec->pnp_event = event;\r
- /* Store a pointer to the ca. */\r
- p_event_rec->p_ci_ca = p_ci_ca;\r
-\r
- switch( event )\r
- {\r
- case IB_PNP_CA_ADD:\r
- p_event_rec->async_item.pfn_callback = __pnp_process_add_ca;\r
-\r
- /* Reserve space for the CA in the CA vector. */\r
- cl_spinlock_acquire( &gp_pnp->obj.lock );\r
- cl_status = cl_ptr_vector_set_size( &gp_pnp->ca_vector,\r
- cl_ptr_vector_get_size( &gp_pnp->ca_vector ) + 1 );\r
- cl_spinlock_release( &gp_pnp->obj.lock );\r
-\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- cl_free( p_event_rec );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("cl_ptr_vector_set_size failed with status %#x.\n",\r
- cl_status) );\r
- return ib_convert_cl_status( cl_status );\r
- }\r
-\r
- /* Read the CA attributes required to process the event. */\r
- status = ci_ca_update_attr( p_ci_ca, &p_old_ca_attr );\r
- if( status != IB_SUCCESS )\r
- {\r
- cl_spinlock_acquire( &gp_pnp->obj.lock );\r
- cl_status = cl_ptr_vector_set_size( &gp_pnp->ca_vector,\r
- cl_ptr_vector_get_size( &gp_pnp->ca_vector ) - 1 );\r
- CL_ASSERT( cl_status == CL_SUCCESS );\r
- cl_spinlock_release( &gp_pnp->obj.lock );\r
- cl_free( p_event_rec );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ci_ca_update_attr failed.\n") );\r
- return status;\r
- }\r
-\r
- /* Take out a reference to the CA until it is removed. */\r
- ref_al_obj( &p_ci_ca->obj );\r
- break;\r
-\r
- case IB_PNP_CA_REMOVE:\r
- if( !p_event_rec->p_ci_ca->p_pnp_attr )\r
- {\r
- /* The CA was never added by the PNP manager. */\r
- cl_free( p_event_rec );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Ignoring removal request for unknown CA.\n") );\r
- return IB_NOT_FOUND;\r
- }\r
-\r
- p_event_rec->async_item.pfn_callback = __pnp_process_remove_ca;\r
- break;\r
-\r
- default:\r
- /* Invalid event for this function. */\r
- CL_ASSERT( event == IB_PNP_CA_ADD || event == IB_PNP_CA_REMOVE );\r
- cl_free( p_event_rec );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Invalid event type.\n") );\r
- return IB_ERROR;\r
- }\r
-\r
- /* Queue the event to the async processing manager. */\r
- ref_al_obj( &gp_pnp->obj );\r
- cl_async_proc_queue( gp_async_pnp_mgr, &p_event_rec->async_item );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Port event handling.\r
- */\r
-\r
-/*\r
- * Processes a port event, reporting it to clients from the first\r
- * registrant to the last.\r
- */\r
-static void\r
-__pnp_process_port_forward(\r
- IN al_pnp_ca_event_t* p_event_rec )\r
-{\r
- al_pnp_t *p_reg;\r
- cl_list_item_t *p_reg_item;\r
- al_pnp_context_t *p_context;\r
- ib_port_attr_t *p_port_attr;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- /* Walk the list of registrants for notification. */\r
- for( p_reg_item = cl_qlist_head( &gp_pnp->port_reg_list );\r
- p_reg_item != cl_qlist_end( &gp_pnp->port_reg_list );\r
- p_reg_item = cl_qlist_next( p_reg_item ) )\r
- {\r
- p_reg = PARENT_STRUCT( p_reg_item, al_pnp_t, list_item );\r
-\r
- CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_PORT );\r
-\r
- p_port_attr = p_event_rec->p_ci_ca->p_pnp_attr->p_port_attr;\r
- p_port_attr += p_event_rec->port_index;\r
-\r
- p_context = pnp_get_context( p_reg, &p_port_attr->port_guid );\r
- if( !p_context )\r
- continue;\r
-\r
- /* Notify the user. */\r
- __pnp_notify_user( p_reg, p_context, p_event_rec );\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-/*\r
- * Processes a port event, reporting it to clients from the last\r
- * registrant to the first.\r
- */\r
-static void\r
-__pnp_process_port_backward(\r
- IN al_pnp_ca_event_t* p_event_rec )\r
-{\r
- al_pnp_t *p_reg;\r
- cl_list_item_t *p_reg_item;\r
- al_pnp_context_t *p_context;\r
- ib_port_attr_t *p_port_attr;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- /* Walk the list of registrants for notification. */\r
- for( p_reg_item = cl_qlist_tail( &gp_pnp->port_reg_list );\r
- p_reg_item != cl_qlist_end( &gp_pnp->port_reg_list );\r
- p_reg_item = cl_qlist_prev( p_reg_item ) )\r
- {\r
- p_reg = PARENT_STRUCT( p_reg_item, al_pnp_t, list_item );\r
-\r
- CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_PORT );\r
-\r
- p_port_attr = p_event_rec->p_ci_ca->p_pnp_attr->p_port_attr;\r
- p_port_attr += p_event_rec->port_index;\r
-\r
- p_context = pnp_get_context( p_reg, &p_port_attr->port_guid );\r
- if( !p_context )\r
- continue;\r
-\r
- /* Notify the user. */\r
- __pnp_notify_user( p_reg, p_context, p_event_rec );\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Check for port attribute changes.\r
- */\r
-static void\r
-__pnp_check_ports(\r
- IN al_ci_ca_t* const p_ci_ca,\r
- IN const ib_ca_attr_t* const p_old_ca_attr )\r
-{\r
- uint16_t index;\r
- al_pnp_ca_event_t event_rec;\r
- ib_port_attr_t *p_old_port_attr, *p_new_port_attr;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- /* Store the event information. */\r
- event_rec.p_ci_ca = p_ci_ca;\r
-\r
- for( event_rec.port_index = 0;\r
- event_rec.port_index < p_ci_ca->p_pnp_attr->num_ports;\r
- event_rec.port_index++ )\r
- {\r
- p_old_port_attr = p_old_ca_attr->p_port_attr;\r
- p_old_port_attr += event_rec.port_index;\r
- p_new_port_attr = p_ci_ca->p_pnp_attr->p_port_attr;\r
- p_new_port_attr += event_rec.port_index;\r
-\r
- /* Check the link state. */\r
- if( p_old_port_attr->link_state != p_new_port_attr->link_state )\r
- {\r
- switch( p_new_port_attr->link_state )\r
- {\r
- case IB_LINK_DOWN:\r
- event_rec.pnp_event = IB_PNP_PORT_DOWN;\r
- __pnp_process_port_backward( &event_rec );\r
- break;\r
-\r
- case IB_LINK_INIT:\r
- if( p_old_port_attr->link_state > IB_LINK_INIT )\r
- {\r
- /* Missed the down event. */\r
- event_rec.pnp_event = IB_PNP_PORT_DOWN;\r
- __pnp_process_port_backward( &event_rec );\r
- }\r
- event_rec.pnp_event = IB_PNP_PORT_INIT;\r
- __pnp_process_port_forward( &event_rec );\r
- break;\r
-\r
- case IB_LINK_ARMED:\r
- if( p_old_port_attr->link_state > IB_LINK_ARMED )\r
- {\r
- /* Missed the down and init events. */\r
- event_rec.pnp_event = IB_PNP_PORT_DOWN;\r
- __pnp_process_port_backward( &event_rec );\r
- event_rec.pnp_event = IB_PNP_PORT_INIT;\r
- __pnp_process_port_forward( &event_rec );\r
- }\r
- event_rec.pnp_event = IB_PNP_PORT_ARMED;\r
- __pnp_process_port_forward( &event_rec );\r
- break;\r
-\r
- case IB_LINK_ACTIVE:\r
- case IB_LINK_ACT_DEFER:\r
- if( p_old_port_attr->link_state == IB_LINK_DOWN )\r
- {\r
- /* Missed the init and armed event. */\r
- event_rec.pnp_event = IB_PNP_PORT_INIT;\r
- __pnp_process_port_forward( &event_rec );\r
- event_rec.pnp_event = IB_PNP_PORT_ARMED;\r
- __pnp_process_port_forward( &event_rec );\r
- }\r
- if( p_old_port_attr->link_state < IB_LINK_ACTIVE )\r
- {\r
- event_rec.pnp_event = IB_PNP_PORT_ACTIVE;\r
- __pnp_process_port_forward( &event_rec );\r
- }\r
- break;\r
-\r
- default:\r
- break;\r
- }\r
- }\r
-\r
- /*\r
- * Check for P_Key and GID table changes.\r
- * The tables are only valid in the armed or active states.\r
- */\r
- if( ( (p_old_port_attr->link_state == IB_LINK_ARMED) ||\r
- (p_old_port_attr->link_state == IB_LINK_ACTIVE) )\r
- &&\r
- ( (p_new_port_attr->link_state == IB_LINK_ARMED) ||\r
- (p_new_port_attr->link_state == IB_LINK_ACTIVE) ) )\r
- {\r
- /* A different number of P_Keys indicates a change.*/\r
- if( p_old_port_attr->num_pkeys != p_new_port_attr->num_pkeys )\r
- {\r
- event_rec.pnp_event = IB_PNP_PKEY_CHANGE;\r
- __pnp_process_port_forward( &event_rec );\r
- }\r
- else\r
- {\r
- /* Same number of P_Keys - compare the table contents. */\r
- for( index = 0; index < p_old_port_attr->num_pkeys; index++ )\r
- {\r
- if( p_old_port_attr->p_pkey_table[index] !=\r
- p_new_port_attr->p_pkey_table[index] )\r
- {\r
- event_rec.pnp_event = IB_PNP_PKEY_CHANGE;\r
- __pnp_process_port_forward( &event_rec );\r
- break;\r
- }\r
- }\r
- }\r
-\r
- /* A different number of GIDs indicates a change.*/\r
- if( p_old_port_attr->num_gids != p_new_port_attr->num_gids )\r
- {\r
- event_rec.pnp_event = IB_PNP_GID_CHANGE;\r
- __pnp_process_port_forward( &event_rec );\r
- }\r
- else\r
- {\r
- /* Same number of GIDs - compare the table contents. */\r
- for( index = 0; index < p_old_port_attr->num_gids; index++ )\r
- {\r
- if( cl_memcmp( p_old_port_attr->p_gid_table[index].raw,\r
- p_new_port_attr->p_gid_table[index].raw,\r
- sizeof( ib_gid_t ) ) )\r
- {\r
- event_rec.pnp_event = IB_PNP_GID_CHANGE;\r
- __pnp_process_port_forward( &event_rec );\r
- break;\r
- }\r
- }\r
- }\r
- }\r
-\r
- /* Check for LID change. */\r
- if( (p_old_port_attr->lid != p_new_port_attr->lid) ||\r
- (p_old_port_attr->lmc != p_new_port_attr->lmc) )\r
- {\r
- event_rec.pnp_event = IB_PNP_LID_CHANGE;\r
- __pnp_process_port_forward( &event_rec );\r
- }\r
- /* Check for SM related changes. */\r
- if( (p_old_port_attr->sm_lid != p_new_port_attr->sm_lid) ||\r
- (p_old_port_attr->sm_sl != p_new_port_attr->sm_sl) )\r
- {\r
- event_rec.pnp_event = IB_PNP_SM_CHANGE;\r
- __pnp_process_port_forward( &event_rec );\r
- }\r
- /* Check for subnet timeout change. */\r
- if( p_old_port_attr->subnet_timeout !=\r
- p_new_port_attr->subnet_timeout )\r
- {\r
- event_rec.pnp_event = IB_PNP_SUBNET_TIMEOUT_CHANGE;\r
- __pnp_process_port_forward( &event_rec );\r
- }\r
- }\r
-}\r
-\r
-\r
-\r
-static boolean_t\r
-__pnp_cmp_attr(\r
- IN ib_ca_attr_t *p_attr_1,\r
- IN ib_ca_attr_t *p_attr_2\r
- )\r
-{\r
- uint8_t port_index;\r
- ib_port_attr_t* p_port_attr_1;\r
- ib_port_attr_t* p_port_attr_2;\r
-\r
- CL_ASSERT( p_attr_1 && p_attr_2 );\r
-\r
- for( port_index = 0;\r
- port_index < p_attr_1->num_ports;\r
- port_index++ )\r
- {\r
- /* Initialize pointers to the port attributes. */\r
- p_port_attr_1 = &p_attr_1->p_port_attr[port_index];\r
- p_port_attr_2 = &p_attr_2->p_port_attr[port_index];\r
-\r
- CL_ASSERT( p_port_attr_1->port_guid == p_port_attr_2->port_guid );\r
-\r
- if( cl_memcmp( p_port_attr_1, p_port_attr_2,\r
- offsetof( ib_port_attr_t, p_gid_table ) ) != 0 )\r
- {\r
- return FALSE;\r
- }\r
- }\r
-\r
- return TRUE;\r
-}\r
-\r
-\r
-\r
-static void\r
-__pnp_check_events(\r
- IN cl_async_proc_item_t* p_item )\r
-{\r
- al_ci_ca_t *p_ci_ca;\r
- size_t i;\r
- uint32_t attr_size;\r
- ib_ca_attr_t *p_old_ca_attr;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- UNUSED_PARAM( p_item );\r
- CL_ASSERT( gp_pnp );\r
-\r
- /* Walk all known CAs. */\r
- for( i = 0; i < cl_ptr_vector_get_size( &gp_pnp->ca_vector ); i++ )\r
- {\r
- p_ci_ca = (al_ci_ca_t*)cl_ptr_vector_get( &gp_pnp->ca_vector, i );\r
-\r
- /* Check if the CA was just added to our list but is not ready. */\r
- if( !p_ci_ca )\r
- continue;\r
-\r
- attr_size = p_ci_ca->p_pnp_attr->size;\r
- status = ib_query_ca( p_ci_ca->h_ca, p_ci_ca->p_user_attr, &attr_size );\r
-\r
- /* Report changes if there is an attribute size difference. */\r
- if( ( attr_size != p_ci_ca->p_pnp_attr->size ) ||\r
- !__pnp_cmp_attr( p_ci_ca->p_pnp_attr, p_ci_ca->p_user_attr ) )\r
- {\r
- status = ci_ca_update_attr( p_ci_ca, &p_old_ca_attr );\r
- if( status == IB_SUCCESS )\r
- {\r
- /* Check port attributes and report changes. */\r
- __pnp_check_ports( p_ci_ca, p_old_ca_attr );\r
-\r
- /* Free the old CA attributes. */\r
- cl_free( p_old_ca_attr );\r
- }\r
- else\r
- {\r
- /*\r
- * Could not get new attribute buffers.\r
- * Skip this event - it should be picked up on the next check.\r
- */\r
- continue;\r
- }\r
- }\r
- }\r
-\r
- /* Dereference the PnP Manager. */\r
- deref_al_obj( &gp_pnp->obj );\r
- gp_pnp->async_item_is_busy = FALSE;\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Check and report PnP events.\r
- */\r
-void\r
-pnp_poll(\r
- void )\r
-{\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- CL_ASSERT( gp_pnp );\r
-\r
- /* Determine if the PnP manager asynchronous processing item is busy. */\r
- cl_spinlock_acquire( &gp_pnp->obj.lock );\r
-\r
- if( gp_pnp->async_item_is_busy )\r
- {\r
- cl_spinlock_release( &gp_pnp->obj.lock );\r
- return;\r
- }\r
-\r
- gp_pnp->async_item_is_busy = TRUE;\r
-\r
- cl_spinlock_release( &gp_pnp->obj.lock );\r
-\r
- /* Reference the PnP Manager. */\r
- ref_al_obj( &gp_pnp->obj );\r
-\r
- /* Queue the request to check for PnP events. */\r
- cl_async_proc_queue( gp_async_pnp_mgr, &gp_pnp->async_item );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-\r
-static void\r
-__pnp_process_ca_change(\r
- IN cl_async_proc_item_t* p_item )\r
-{\r
- al_pnp_ca_change_t *p_pnp_ca_change;\r
- ib_ca_attr_t *p_old_ca_attr;\r
- al_ci_ca_t *p_ci_ca;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- CL_ASSERT( p_item );\r
- CL_ASSERT( gp_pnp );\r
-\r
- p_pnp_ca_change = PARENT_STRUCT( p_item, al_pnp_ca_change_t, async_item );\r
-\r
- p_ci_ca = p_pnp_ca_change->p_ci_ca;\r
-\r
- /*\r
- * Prevent readers of the CA attributes from accessing them while\r
- * we are updating the pointers.\r
- */\r
- ci_ca_excl_lock_attr( p_ci_ca );\r
-\r
- /* Swap the old and new CA attributes. */\r
- p_old_ca_attr = p_ci_ca->p_pnp_attr;\r
- p_ci_ca->p_pnp_attr = p_pnp_ca_change->p_new_ca_attr;\r
- p_ci_ca->p_user_attr = (ib_ca_attr_t*)(((uint8_t*)p_ci_ca->p_pnp_attr) +\r
- p_ci_ca->p_pnp_attr->size);\r
- ci_ca_unlock_attr( p_ci_ca );\r
-\r
- /* Report changes. */\r
- __pnp_check_ports( p_ci_ca, p_old_ca_attr );\r
-\r
- /* Free the old CA attributes. */\r
- cl_free( p_old_ca_attr );\r
-\r
- /* Dereference the PnP Manager. */\r
- deref_al_obj( &gp_pnp->obj );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Called by user mode AL to report a CA attribute change.\r
- */\r
-ib_api_status_t\r
-pnp_ca_change(\r
- IN al_ci_ca_t* const p_ci_ca,\r
- IN const ib_ca_attr_t* p_ca_attr )\r
-{\r
- ib_ca_attr_t* p_new_ca_attr;\r
- al_pnp_ca_change_t* p_pnp_ca_change;\r
- size_t size;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- CL_ASSERT( p_ci_ca );\r
- CL_ASSERT( p_ca_attr );\r
-\r
- /*\r
- * Allocate the new CA attributes buffer.\r
- * Double the buffer size for PnP and user reporting halves.\r
- * Also include the CA change event structure in the allocation.\r
- */\r
- size = ( p_ca_attr->size * 2 ) + sizeof( al_pnp_ca_change_t );\r
- p_new_ca_attr = (ib_ca_attr_t*)cl_zalloc( size );\r
- if( !p_new_ca_attr )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR,AL_DBG_PNP,\r
- ("Unable to allocate buffer for changed CA attributes\n") );\r
- return IB_INSUFFICIENT_MEMORY;\r
- }\r
-\r
- /* Copy the attributes. */\r
- ib_copy_ca_attr( p_new_ca_attr, p_ca_attr );\r
-\r
- /* Initialize a pointer to the CA change event structure. */\r
- p_pnp_ca_change = (al_pnp_ca_change_t*)\r
- (((uint8_t*)p_new_ca_attr) + ( p_ca_attr->size * 2 ));\r
-\r
- /* Initialize the CA change event strucuture. */\r
- p_pnp_ca_change->async_item.pfn_callback = __pnp_process_ca_change;\r
- p_pnp_ca_change->p_ci_ca = p_ci_ca;\r
- p_pnp_ca_change->p_new_ca_attr = p_new_ca_attr;\r
-\r
- /* Reference the PnP Manager. */\r
- ref_al_obj( &gp_pnp->obj );\r
-\r
- /* Queue the CA change event. */\r
- cl_async_proc_queue( gp_async_pnp_mgr, &p_pnp_ca_change->async_item );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-ib_api_status_t\r
-ib_reject_ioc(\r
- IN const ib_al_handle_t h_al,\r
- IN const ib_pnp_handle_t h_event )\r
-{\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") );\r
- return IB_INVALID_AL_HANDLE;\r
- }\r
- if( !h_event )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );\r
- return IB_INVALID_HANDLE;\r
- }\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return IB_UNSUPPORTED;\r
-}\r
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_proxy.c 931 2008-01-31 09:20:41Z leonidk $\r
- */\r
-\r
-\r
-#include <complib/comp_lib.h>\r
-#include <iba/ib_al.h>\r
-#include <iba/ib_al_ioctl.h>\r
-#include "al.h"\r
-#include "al_mr.h"\r
-#include "al_debug.h"\r
-#if defined(EVENT_TRACING)\r
-#ifdef offsetof\r
-#undef offsetof\r
-#endif\r
-#include "al_proxy.tmh"\r
-#endif\r
-#include "al_dev.h"\r
-#include "al_ci_ca.h"\r
-#include "al_mgr.h"\r
-#include "al_pnp.h"\r
-#include "al_proxy.h"\r
-#include "ib_common.h"\r
-\r
-\r
-\r
-/*\r
- * Acquire an object used to queue callbacks.\r
- */\r
-al_proxy_cb_info_t*\r
-proxy_cb_get(\r
- IN al_dev_open_context_t *p_context )\r
-{\r
- al_proxy_cb_info_t *p_cb_info;\r
-\r
- if( !p_context )\r
- return NULL;\r
-\r
- cl_spinlock_acquire( &p_context->cb_pool_lock );\r
- p_cb_info = (al_proxy_cb_info_t*)cl_qpool_get( &p_context->cb_pool );\r
- cl_spinlock_release( &p_context->cb_pool_lock );\r
-\r
- if( p_cb_info )\r
- p_cb_info->p_context = p_context;\r
-\r
- return p_cb_info;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Release an object used to report callbacks.\r
- */\r
-void\r
-proxy_cb_put(\r
- IN al_proxy_cb_info_t *p_cb_info )\r
-{\r
- al_dev_open_context_t *p_context;\r
-\r
- if( !p_cb_info )\r
- return;\r
-\r
- p_context = p_cb_info->p_context;\r
-\r
- p_cb_info->reported = FALSE;\r
- p_cb_info->p_al_obj = NULL;\r
-\r
- cl_spinlock_acquire( &p_context->cb_pool_lock );\r
- cl_qpool_put( &p_context->cb_pool, &p_cb_info->pool_item );\r
- cl_spinlock_release( &p_context->cb_pool_lock );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_REG_SHMID:\r
- */\r
-static\r
-cl_status_t\r
-proxy_reg_shmid(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_reg_shmid_ioctl_t *p_ioctl =\r
- (ual_reg_shmid_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_pd_handle_t h_pd;\r
- ib_mr_handle_t h_mr;\r
- uint64_t vaddr;\r
- net32_t lkey, rkey;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) < sizeof(ual_reg_shmid_ioctl_t) ||\r
- cl_ioctl_out_size( h_ioctl ) < sizeof(ual_reg_shmid_ioctl_t) )\r
- {\r
- AL_EXIT( AL_DBG_DEV );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate PD handle */\r
- h_pd = (ib_pd_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD );\r
- if( !h_pd )\r
- {\r
- cl_memclr( &p_ioctl->out, sizeof(p_ioctl->out) );\r
- p_ioctl->out.status = IB_INVALID_PD_HANDLE;\r
- goto done;\r
- }\r
-\r
- /* Validate input region size. */\r
- if( p_ioctl->in.mr_create.length > ~((size_t)0) )\r
- {\r
- cl_memclr( &p_ioctl->out, sizeof(p_ioctl->out) );\r
- p_ioctl->out.status = IB_INVALID_SETTING;\r
- goto done;\r
- }\r
-\r
- p_ioctl->out.status = reg_shmid(\r
- h_pd,\r
- p_ioctl->in.shmid,\r
- &p_ioctl->in.mr_create,\r
- &vaddr,\r
- &lkey,\r
- &rkey,\r
- &h_mr );\r
-\r
- if( p_ioctl->out.status == IB_SUCCESS )\r
- {\r
- /* We put the kernel al handle itself in the al_list for the process */\r
- p_ioctl->out.vaddr = vaddr;\r
- p_ioctl->out.lkey = lkey;\r
- p_ioctl->out.rkey = rkey;\r
- p_ioctl->out.h_mr = h_mr->obj.hdl;\r
- h_mr->obj.hdl_valid = TRUE;\r
- deref_al_obj( &h_mr->obj );\r
- }\r
- else\r
- {\r
- /* release the memory handle allocated */\r
- p_ioctl->out.vaddr = 0;\r
- p_ioctl->out.lkey = 0;\r
- p_ioctl->out.rkey = 0;\r
- p_ioctl->out.h_mr = AL_INVALID_HANDLE;\r
- }\r
-\r
-done:\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
- AL_EXIT( AL_DBG_DEV );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Retrieve a callback record from the appropriate callback list\r
- * and fill the ioctl buffer.\r
- *\r
- * If no callback record is available, queue the ioctl buffer.\r
- * Queued ioctl buffer will put the calling process to sleep and complete\r
- * when complete when a callback record is available.\r
- */\r
-static cl_status_t\r
-proxy_queue_ioctl_buf(\r
- IN uintn_t cb_type,\r
- IN al_dev_open_context_t *p_context,\r
- IN cl_ioctl_handle_t h_ioctl )\r
-{\r
- cl_qlist_t *p_cb_list;\r
- al_proxy_cb_info_t *p_cb_info;\r
- cl_ioctl_handle_t *ph_ioctl;\r
- uintn_t ioctl_size;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- /* Set up the appropriate callback list. */\r
- switch( cb_type )\r
- {\r
- case UAL_GET_CM_CB_INFO:\r
- p_cb_list = &p_context->cm_cb_list;\r
- ph_ioctl = &p_context->h_cm_ioctl;\r
- /* TODO: Use output size only. */\r
- ioctl_size = sizeof( cm_cb_ioctl_info_t );\r
- break;\r
-\r
- case UAL_GET_COMP_CB_INFO:\r
- p_cb_list = &p_context->comp_cb_list;\r
- ph_ioctl = &p_context->h_comp_ioctl;\r
- /* TODO: Use output size only. */\r
- ioctl_size = sizeof( comp_cb_ioctl_info_t );\r
- break;\r
-\r
- case UAL_GET_MISC_CB_INFO:\r
- p_cb_list = &p_context->misc_cb_list;\r
- ph_ioctl = &p_context->h_misc_ioctl;\r
- /* TODO: Use output size only. */\r
- ioctl_size = sizeof( misc_cb_ioctl_info_t );\r
- break;\r
-\r
- default:\r
- AL_EXIT( AL_DBG_DEV );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Process queued callbacks. */\r
- cl_spinlock_acquire( &p_context->cb_lock );\r
- while( !cl_is_qlist_empty( p_cb_list ) )\r
- {\r
- p_cb_info = (al_proxy_cb_info_t*)cl_qlist_head( p_cb_list );\r
-\r
- /* Check to see if we've already reported the callback. */\r
- if( !p_cb_info->reported )\r
- {\r
- p_cb_info->reported = TRUE;\r
-\r
- /* Return the callback to the user. */\r
- CL_ASSERT( cl_ioctl_out_size( h_ioctl ) >= ioctl_size );\r
- cl_memcpy(\r
- cl_ioctl_out_buf( h_ioctl ), &p_cb_info->cb_type, ioctl_size );\r
- cl_ioctl_complete( h_ioctl, CL_SUCCESS, ioctl_size );\r
- cl_spinlock_release( &p_context->cb_lock );\r
- AL_EXIT( AL_DBG_DEV );\r
- return CL_COMPLETED;\r
- }\r
- if( p_cb_info->p_al_obj )\r
- deref_al_obj( p_cb_info->p_al_obj );\r
-\r
- cl_qlist_remove_head( p_cb_list );\r
- proxy_cb_put( p_cb_info );\r
- }\r
-\r
- /* There are no callbacks to report. Mark this IOCTL as pending. */\r
- CL_ASSERT( !(*ph_ioctl) );\r
-\r
- /* If we're closing down, complete the IOCTL with a canceled status. */\r
- if( p_context->closing )\r
- {\r
- cl_spinlock_release( &p_context->cb_lock );\r
- AL_EXIT( AL_DBG_DEV );\r
- return CL_CANCELED;\r
- }\r
-\r
- *ph_ioctl = h_ioctl;\r
- /* Set the cancel routine for this IRP so the app can abort. */\r
-#pragma warning(push, 3)\r
- IoSetCancelRoutine( h_ioctl, al_dev_cancel_io );\r
-#pragma warning(pop)\r
- /* If returning pending, the IRP must be marked as such. */\r
- IoMarkIrpPending( h_ioctl );\r
-\r
- /* Ref the context until the IOCTL is either completed or cancelled. */\r
- proxy_context_ref( p_context );\r
- cl_spinlock_release( &p_context->cb_lock );\r
-\r
- AL_EXIT( AL_DBG_DEV );\r
- return CL_PENDING;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_GET_CM_CB_INFO:\r
- * Get a CM callback record from the queue of CM callback records\r
- */\r
-static cl_status_t\r
-proxy_get_cm_cb(\r
- IN cl_ioctl_handle_t h_ioctl )\r
-{\r
- cl_status_t cl_status;\r
- IO_STACK_LOCATION *p_io_stack;\r
- al_dev_open_context_t *p_context;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
- p_context = (al_dev_open_context_t*)p_io_stack->FileObject->FsContext;\r
- if( (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_CM )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Invalid file object type for request: %016I64x\n",\r
- (LONG_PTR)p_io_stack->FileObject->FsContext2) );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Check the size of the ioctl */\r
- if( !p_context || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(cm_cb_ioctl_info_t) )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("No output buffer, or buffer too small.\n") );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- cl_status = proxy_queue_ioctl_buf( UAL_GET_CM_CB_INFO,\r
- p_context, h_ioctl );\r
-\r
- AL_EXIT( AL_DBG_DEV );\r
- return cl_status;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_GET_COMP_CB_INFO:\r
- * Get a completion callback record from the queue of CM callback records\r
- */\r
-static cl_status_t\r
-proxy_get_comp_cb(\r
- IN cl_ioctl_handle_t h_ioctl )\r
-{\r
- cl_status_t cl_status;\r
- IO_STACK_LOCATION *p_io_stack;\r
- al_dev_open_context_t *p_context;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
- p_context = (al_dev_open_context_t*)p_io_stack->FileObject->FsContext;\r
- if( (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_H_CQ )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Invalid file object type for request: %016I64x\n",\r
- (LONG_PTR)p_io_stack->FileObject->FsContext2) );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Check the size of the ioctl */\r
- if( !p_context || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(comp_cb_ioctl_info_t) )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("No output buffer, or buffer too small.\n") );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- cl_status = proxy_queue_ioctl_buf( UAL_GET_COMP_CB_INFO,\r
- p_context, h_ioctl );\r
-\r
- AL_EXIT( AL_DBG_DEV );\r
- return cl_status;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_GET_MISC_CB_INFO:\r
- * Get a miscellaneous callback record from the queue of CM callback records\r
- */\r
-static cl_status_t\r
-proxy_get_misc_cb(\r
- IN cl_ioctl_handle_t h_ioctl )\r
-{\r
- cl_status_t cl_status;\r
- IO_STACK_LOCATION *p_io_stack;\r
- al_dev_open_context_t *p_context;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
- p_context = (al_dev_open_context_t*)p_io_stack->FileObject->FsContext;\r
- if( (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_AL_MGR )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Invalid file object type for request: %016I64x\n",\r
- (LONG_PTR)p_io_stack->FileObject->FsContext2) );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Check the size of the ioctl */\r
- if( !p_context || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(misc_cb_ioctl_info_t) )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("No output buffer, or buffer too small.\n") );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- cl_status = proxy_queue_ioctl_buf( UAL_GET_MISC_CB_INFO,\r
- p_context, h_ioctl );\r
-\r
- AL_EXIT( AL_DBG_DEV );\r
- return cl_status;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process a PnP callback for a CA.\r
- */\r
-ib_api_status_t\r
-proxy_pnp_ca_cb(\r
- IN ib_pnp_rec_t *p_pnp_rec )\r
-{\r
- misc_cb_ioctl_info_t misc_cb_info;\r
- misc_cb_ioctl_rec_t *p_misc_rec = &misc_cb_info.ioctl_rec;\r
- al_dev_open_context_t *p_context;\r
-\r
- AL_ENTER( AL_DBG_PROXY_CB );\r
-\r
- p_context = p_pnp_rec->pnp_context;\r
-\r
- /*\r
- * If we're already closing the device - do not queue a callback, since\r
- * we're cleaning up the callback lists.\r
- */\r
- if( !proxy_context_ref( p_context ) )\r
- {\r
- proxy_context_deref( p_context );\r
- return IB_ERROR;\r
- }\r
-\r
- /* Initialize the PnP callback information to return to user-mode. */\r
- cl_memclr( &misc_cb_info, sizeof(misc_cb_info) );\r
- misc_cb_info.rec_type = PNP_REC;\r
- p_misc_rec->pnp_cb_ioctl_rec.pnp_event = p_pnp_rec->pnp_event;\r
-\r
- switch( p_pnp_rec->pnp_event )\r
- {\r
- case IB_PNP_CA_ADD:\r
- case IB_PNP_CA_REMOVE:\r
- /* Queue the add/remove pnp record */\r
- p_misc_rec->pnp_cb_ioctl_rec.pnp_info.ca.ca_guid = p_pnp_rec->guid;\r
- proxy_queue_cb_buf( UAL_GET_MISC_CB_INFO, p_context, &misc_cb_info,\r
- NULL );\r
- break;\r
-\r
- default:\r
- /* We only handle CA adds and removals. */\r
- break;\r
- }\r
-\r
- proxy_context_deref( p_context );\r
- AL_EXIT( AL_DBG_PROXY_CB );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process a PnP callback for a port.\r
- */\r
-ib_api_status_t\r
-proxy_pnp_port_cb(\r
- IN ib_pnp_rec_t *p_pnp_rec )\r
-{\r
- ib_pnp_port_rec_t *p_port_rec;\r
- misc_cb_ioctl_info_t misc_cb_info;\r
- misc_cb_ioctl_rec_t *p_misc_rec = &misc_cb_info.ioctl_rec;\r
- al_dev_open_context_t *p_context;\r
- ib_ca_attr_t *p_ca_attr;\r
- uint64_t hdl;\r
-\r
- AL_ENTER( AL_DBG_PROXY_CB );\r
-\r
- p_context = p_pnp_rec->pnp_context;\r
-\r
- /*\r
- * If we're already closing the device - do not queue a callback, since\r
- * we're cleaning up the callback lists.\r
- */\r
- if( !proxy_context_ref( p_context ) )\r
- {\r
- proxy_context_deref( p_context );\r
- return IB_ERROR;\r
- }\r
-\r
- p_port_rec = (ib_pnp_port_rec_t*)p_pnp_rec;\r
-\r
- /* Initialize the PnP callback information to return to user-mode. */\r
- cl_memclr( &misc_cb_info, sizeof(misc_cb_info) );\r
- misc_cb_info.rec_type = PNP_REC;\r
- p_misc_rec->pnp_cb_ioctl_rec.pnp_event = p_pnp_rec->pnp_event;\r
-\r
- switch( p_pnp_rec->pnp_event )\r
- {\r
- case IB_PNP_PORT_ADD:\r
- case IB_PNP_PORT_REMOVE:\r
- /* Port add/remove will be generated automatically by uAL. */\r
- break;\r
-\r
- case IB_PNP_REG_COMPLETE:\r
- /*\r
- * Once our registration for ports is complete, report this to the\r
- * user-mode library. This indicates to the that the current\r
- * system state has been reported.\r
- */\r
- proxy_queue_cb_buf( UAL_GET_MISC_CB_INFO, p_context, &misc_cb_info,\r
- NULL );\r
- break;\r
-\r
- default:\r
- /* Allocate space for the CA attributes. */\r
- p_ca_attr = cl_zalloc( p_port_rec->p_ca_attr->size );\r
- if( !p_ca_attr )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("cl_malloc( %d ) failed.\n", p_port_rec->p_ca_attr->size) );\r
- break;\r
- }\r
-\r
- ib_copy_ca_attr( p_ca_attr, p_port_rec->p_ca_attr );\r
-\r
- hdl = al_hdl_lock_insert(\r
- p_context->h_al, p_ca_attr, AL_OBJ_TYPE_H_CA_ATTR );\r
-\r
- if( hdl == AL_INVALID_HANDLE )\r
- {\r
- cl_free( p_ca_attr );\r
- break;\r
- }\r
-\r
- p_misc_rec->pnp_cb_ioctl_rec.pnp_info.ca.ca_guid =\r
- p_port_rec->p_ca_attr->ca_guid;\r
- p_misc_rec->pnp_cb_ioctl_rec.pnp_info.ca.size =\r
- p_port_rec->p_ca_attr->size;\r
- p_misc_rec->pnp_cb_ioctl_rec.pnp_info.ca.h_ca_attr = hdl;\r
-\r
- proxy_queue_cb_buf( UAL_GET_MISC_CB_INFO, p_context, &misc_cb_info,\r
- NULL );\r
- break;\r
- }\r
-\r
- proxy_context_deref( p_context );\r
- AL_EXIT( AL_DBG_PROXY_CB );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-cl_status_t\r
-proxy_get_ca_attr(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
- ual_ca_attr_info_ioctl_t *p_ioctl;\r
- ib_ca_attr_t *p_src;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- /* Check the size of the ioctl */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) < sizeof(p_ioctl->out) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("invalid buffer size\n") );\r
- return CL_INVALID_PARAMETER;\r
- }\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
- p_ioctl = (ual_ca_attr_info_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
-\r
- p_src = (ib_ca_attr_t*)al_hdl_get(\r
- p_context->h_al, p_ioctl->in.h_ca_attr, AL_OBJ_TYPE_H_CA_ATTR );\r
- if( !p_src )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("invalid attr handle\n") );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- __try\r
- {\r
- ProbeForWrite( p_ioctl->in.p_ca_attr, p_src->size, sizeof(void*) );\r
- ib_copy_ca_attr( p_ioctl->in.p_ca_attr, p_src );\r
- p_ioctl->out.status = IB_SUCCESS;\r
- }\r
- __except(EXCEPTION_EXECUTE_HANDLER)\r
- {\r
- p_ioctl->out.status = IB_INVALID_PERMISSION;\r
- }\r
-\r
- cl_free(p_src);\r
-\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_DEV );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_BIND_SA:\r
- * Get a completion callback record from the queue of CM callback records\r
- */\r
-static cl_status_t\r
-proxy_bind_file(\r
- IN cl_ioctl_handle_t h_ioctl,\r
- IN const uint32_t type )\r
-{\r
- NTSTATUS status;\r
- IO_STACK_LOCATION *p_io_stack;\r
- al_dev_open_context_t *p_context;\r
- ual_bind_file_ioctl_t *p_ioctl;\r
- FILE_OBJECT *p_file_obj;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
- p_context = (al_dev_open_context_t*)p_io_stack->FileObject->FsContext;\r
-\r
- /* Check the size of the ioctl */\r
- if( !p_context ||\r
- !cl_ioctl_in_buf( h_ioctl ) || cl_ioctl_out_size( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(ual_bind_file_ioctl_t) )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("No input buffer, or buffer too small.\n") );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- p_ioctl = cl_ioctl_in_buf( h_ioctl );\r
-\r
- status = ObReferenceObjectByHandle( p_ioctl->h_file,\r
- READ_CONTROL, *IoFileObjectType, h_ioctl->RequestorMode,\r
- &p_file_obj, NULL );\r
- if( !NT_SUCCESS(status) )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ObReferenceObjectByHandle returned 0x%08X\n", status) );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- p_file_obj->FsContext = p_context;\r
- p_file_obj->FsContext2 = (void*)(ULONG_PTR)type;\r
-\r
- ObDereferenceObject( p_file_obj );\r
-\r
- AL_EXIT( AL_DBG_DEV );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-cl_status_t\r
-proxy_ioctl(\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- cl_status_t cl_status;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- UNUSED_PARAM( p_ret_bytes );\r
-\r
- switch( cl_ioctl_ctl_code( h_ioctl ) )\r
- {\r
- case UAL_GET_CM_CB_INFO:\r
- cl_status = proxy_get_cm_cb( h_ioctl );\r
- break;\r
- case UAL_GET_MISC_CB_INFO:\r
- cl_status = proxy_get_misc_cb( h_ioctl );\r
- break;\r
- case UAL_GET_COMP_CB_INFO:\r
- cl_status = proxy_get_comp_cb( h_ioctl );\r
- break;\r
- case UAL_BIND:\r
- cl_status = al_dev_open( h_ioctl );\r
- break;\r
- case UAL_BIND_SA:\r
- cl_status = proxy_bind_file( h_ioctl, AL_OBJ_TYPE_SA_REQ_SVC );\r
- break;\r
- case UAL_BIND_DESTROY:\r
- case UAL_BIND_PNP:\r
- cl_status = proxy_bind_file( h_ioctl, AL_OBJ_TYPE_PNP_MGR );\r
- break;\r
- case UAL_BIND_CM:\r
- cl_status = proxy_bind_file( h_ioctl, AL_OBJ_TYPE_CM );\r
- break;\r
- case UAL_BIND_CQ:\r
- cl_status = proxy_bind_file( h_ioctl, AL_OBJ_TYPE_H_CQ );\r
- break;\r
- case UAL_BIND_MISC:\r
- cl_status = proxy_bind_file( h_ioctl, AL_OBJ_TYPE_AL_MGR );\r
- break;\r
- case UAL_BIND_ND:\r
- cl_status = proxy_bind_file( h_ioctl, AL_OBJ_TYPE_NDI );\r
- break;\r
- default:\r
- cl_status = CL_INVALID_PARAMETER;\r
- break;\r
- }\r
-\r
- AL_EXIT( AL_DBG_DEV );\r
- return cl_status;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__proxy_pnp_cb(\r
- IN ib_pnp_rec_t *p_pnp_rec )\r
-{\r
- proxy_pnp_evt_t *p_evt;\r
- uint32_t rec_size;\r
- proxy_pnp_recs_t *p_evt_rec, *p_rec;\r
- IRP *p_irp;\r
- IO_STACK_LOCATION *p_io_stack;\r
- ual_rearm_pnp_ioctl_out_t *p_ioctl;\r
- al_dev_open_context_t *p_context;\r
- uint64_t hdl;\r
- cl_status_t cl_status;\r
- ib_api_status_t ret_status;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_rec = (proxy_pnp_recs_t*)p_pnp_rec;\r
-\r
- /*\r
- * If an add event, return error to suppress all further\r
- * events for this target.\r
- */\r
- if( p_pnp_rec->pnp_event & IB_PNP_EVENT_ADD )\r
- ret_status = IB_ERROR;\r
- else\r
- ret_status = IB_SUCCESS;\r
-\r
- p_context = p_pnp_rec->pnp_context;\r
- ASSERT( p_context );\r
-\r
- /* Must take and release mutex to synchronize with registration. */\r
- cl_mutex_acquire( &p_context->pnp_mutex );\r
- cl_mutex_release( &p_context->pnp_mutex );\r
-\r
- p_irp = InterlockedExchangePointer( &p_pnp_rec->h_pnp->p_rearm_irp, NULL );\r
- if( !p_irp )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("No rearm IRP queued for PnP event.\n") );\r
- return ret_status;\r
- }\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( p_irp );\r
-\r
- p_context = p_io_stack->FileObject->FsContext;\r
- ASSERT( p_context );\r
-#pragma warning(push, 3)\r
- IoSetCancelRoutine( p_irp, NULL );\r
-#pragma warning(pop)\r
- switch( pnp_get_class( p_pnp_rec->pnp_event ) )\r
- {\r
- case IB_PNP_CA:\r
- if( p_pnp_rec->pnp_event == IB_PNP_CA_REMOVE )\r
- rec_size = sizeof(ib_pnp_ca_rec_t);\r
- else\r
- rec_size = sizeof(ib_pnp_ca_rec_t) + p_rec->ca.p_ca_attr->size;\r
- break;\r
- case IB_PNP_PORT:\r
- if( p_pnp_rec->pnp_event == IB_PNP_PORT_REMOVE )\r
- rec_size = sizeof(ib_pnp_port_rec_t);\r
- else\r
- rec_size = sizeof(ib_pnp_port_rec_t) + p_rec->port.p_ca_attr->size;\r
- break;\r
- case IB_PNP_IOU:\r
- rec_size = sizeof(ib_pnp_iou_rec_t);\r
- break;\r
- case IB_PNP_IOC:\r
- switch( p_pnp_rec->pnp_event )\r
- {\r
- case IB_PNP_IOC_PATH_ADD:\r
- case IB_PNP_IOC_PATH_REMOVE:\r
- rec_size = sizeof( ib_pnp_ioc_path_rec_t);\r
- break;\r
- default:\r
- rec_size = sizeof( ib_pnp_ioc_rec_t ) + (sizeof(ib_svc_entry_t) *\r
- (p_rec->ioc.info.profile.num_svc_entries - 1));\r
- }\r
- break;\r
- default:\r
- /* The REG_COMPLETE event is not associated with any class. */\r
- rec_size = sizeof( ib_pnp_rec_t );\r
- break;\r
- }\r
-\r
- p_evt = cl_zalloc( rec_size + sizeof(proxy_pnp_evt_t) );\r
- if( !p_evt )\r
- return ret_status;\r
-\r
- /* Note that cl_event_init cannot fail in kernel-mode. */\r
- cl_event_init( &p_evt->event, FALSE );\r
-\r
- p_evt->rec_size = rec_size;\r
-\r
- p_evt_rec = (proxy_pnp_recs_t*)(p_evt + 1);\r
-\r
- /* Copy the PnP event data. */\r
- switch( pnp_get_class( p_pnp_rec->pnp_event ) )\r
- {\r
- case IB_PNP_CA:\r
- cl_memcpy( p_evt_rec, p_pnp_rec, sizeof(ib_pnp_ca_rec_t) );\r
- if( p_pnp_rec->pnp_event == IB_PNP_CA_REMOVE )\r
- {\r
- p_evt_rec->ca.p_ca_attr = NULL;\r
- }\r
- else\r
- {\r
- p_evt_rec->ca.p_ca_attr = (ib_ca_attr_t*)(&p_evt_rec->ca + 1);\r
- ib_copy_ca_attr( p_evt_rec->ca.p_ca_attr, p_rec->ca.p_ca_attr );\r
- }\r
- break;\r
- case IB_PNP_PORT:\r
- cl_memcpy( p_evt_rec, p_pnp_rec, sizeof(ib_pnp_port_rec_t) );\r
- if( p_pnp_rec->pnp_event == IB_PNP_PORT_REMOVE )\r
- {\r
- p_evt_rec->port.p_ca_attr = NULL;\r
- p_evt_rec->port.p_port_attr = NULL;\r
- }\r
- else\r
- {\r
- p_evt_rec->port.p_ca_attr = (ib_ca_attr_t*)(&p_evt_rec->port + 1);\r
- ib_copy_ca_attr(\r
- p_evt_rec->port.p_ca_attr, p_rec->port.p_ca_attr );\r
- p_evt_rec->port.p_port_attr = &p_evt_rec->port.p_ca_attr->\r
- p_port_attr[p_rec->port.p_port_attr->port_num - 1];\r
- }\r
- break;\r
- case IB_PNP_IOU:\r
- cl_memcpy( p_evt_rec, p_pnp_rec, sizeof(ib_pnp_iou_rec_t) );\r
- break;\r
- case IB_PNP_IOC:\r
- switch( p_pnp_rec->pnp_event )\r
- {\r
- case IB_PNP_IOC_PATH_ADD:\r
- case IB_PNP_IOC_PATH_REMOVE:\r
- cl_memcpy( p_evt_rec, p_pnp_rec, sizeof(ib_pnp_ioc_path_rec_t) );\r
- break;\r
- default:\r
- cl_memcpy( p_evt_rec, p_pnp_rec, sizeof(ib_pnp_ioc_rec_t) );\r
- }\r
- break;\r
- default:\r
- p_evt_rec->pnp = *p_pnp_rec;\r
- break;\r
- }\r
-\r
- p_evt_rec->pnp.h_pnp = (ib_pnp_handle_t)p_pnp_rec->h_pnp->obj.hdl;\r
- p_pnp_rec->h_pnp->obj.hdl_valid = TRUE;\r
-\r
- hdl =\r
- al_hdl_lock_insert( p_context->h_al, p_evt, AL_OBJ_TYPE_H_PNP_EVENT );\r
- if( hdl == AL_INVALID_HANDLE )\r
- {\r
- cl_free( p_evt );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Failed to insert PnP event in handle map.\n") );\r
- return ret_status;\r
- }\r
-\r
- p_ioctl = cl_ioctl_out_buf( p_irp );\r
- p_ioctl->evt_hdl = hdl;\r
- p_ioctl->evt_size = rec_size;\r
-\r
- /* Hold callback lock to synchronize with registration. */\r
- cl_spinlock_acquire( &p_context->cb_lock );\r
- p_irp->IoStatus.Status = STATUS_SUCCESS;\r
- p_irp->IoStatus.Information = sizeof(ual_rearm_pnp_ioctl_out_t);\r
- IoCompleteRequest( p_irp, IO_NO_INCREMENT );\r
- cl_spinlock_release( &p_context->cb_lock );\r
-\r
- /* Now wait on the event. */\r
- cl_status = cl_event_wait_on( &p_evt->event, PROXY_PNP_TIMEOUT_US, FALSE );\r
- if( cl_status == CL_SUCCESS )\r
- {\r
- /* Update the event context with the user's requested value. */\r
- p_pnp_rec->context = p_evt->evt_context;\r
- /* Forward the user's status. */\r
- ret_status = p_evt->evt_status;\r
- }\r
- cl_spinlock_acquire( &p_context->h_al->obj.lock );\r
- al_hdl_free( p_context->h_al, hdl );\r
- cl_spinlock_release( &p_context->h_al->obj.lock );\r
- cl_event_destroy( &p_evt->event );\r
- cl_free( p_evt );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return ret_status;\r
-}\r
-\r
-\r
-static void\r
-__cancel_rearm_pnp(\r
- IN DEVICE_OBJECT* p_dev_obj,\r
- IN IRP* p_irp )\r
-{\r
- al_dev_open_context_t *p_context;\r
- PIO_STACK_LOCATION p_io_stack;\r
- uint64_t hdl;\r
- al_pnp_t *h_pnp;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- UNUSED_PARAM( p_dev_obj );\r
-\r
- /* Get the stack location. */\r
- p_io_stack = IoGetCurrentIrpStackLocation( p_irp );\r
-\r
- p_context = (al_dev_open_context_t *)p_io_stack->FileObject->FsContext;\r
- ASSERT( p_context );\r
-\r
- hdl = (size_t)InterlockedExchangePointer(\r
- &p_irp->Tail.Overlay.DriverContext[0], NULL );\r
- if( hdl != AL_INVALID_HANDLE )\r
- {\r
- h_pnp = (al_pnp_t*)\r
- al_hdl_ref( p_context->h_al, hdl, AL_OBJ_TYPE_H_PNP );\r
- if( h_pnp )\r
- {\r
- if( InterlockedExchangePointer( &h_pnp->p_rearm_irp, NULL ) ==\r
- p_irp )\r
- {\r
-#pragma warning(push, 3)\r
- IoSetCancelRoutine( p_irp, NULL );\r
-#pragma warning(pop)\r
- /* Complete the IRP. */\r
- p_irp->IoStatus.Status = STATUS_CANCELLED;\r
- p_irp->IoStatus.Information = 0;\r
- IoCompleteRequest( p_irp, IO_NO_INCREMENT );\r
- }\r
- deref_al_obj( &h_pnp->obj );\r
- }\r
- }\r
-\r
- IoReleaseCancelSpinLock( p_irp->CancelIrql );\r
-}\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_REG_PNP:\r
- */\r
-static cl_status_t\r
-proxy_reg_pnp(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl )\r
-{\r
- ual_reg_pnp_ioctl_in_t *p_ioctl;\r
- al_dev_open_context_t *p_context;\r
- IO_STACK_LOCATION *p_io_stack;\r
- ib_pnp_req_t pnp_req;\r
- ib_api_status_t status, *p_user_status;\r
- uint64_t *p_user_hdl;\r
- ib_pnp_handle_t h_pnp;\r
- cl_status_t cl_status;\r
- KEVENT *p_sync_event;\r
- NTSTATUS nt_status;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_context = p_open_context;\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
- if( (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_PNP_MGR )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Invalid file object type for request: %016I64x\n",\r
- (LONG_PTR)p_io_stack->FileObject->FsContext2) );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- if( cl_ioctl_in_size( h_ioctl ) < sizeof(ual_reg_pnp_ioctl_in_t) ||\r
- cl_ioctl_out_size( h_ioctl ) < sizeof(ual_rearm_pnp_ioctl_out_t) )\r
- {\r
- AL_EXIT( AL_DBG_PNP );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- p_ioctl = cl_ioctl_in_buf( h_ioctl );\r
-\r
- pnp_req.pnp_class = p_ioctl->pnp_class;\r
- pnp_req.pnp_context = p_open_context;\r
- pnp_req.pfn_pnp_cb = __proxy_pnp_cb;\r
-\r
- p_user_status = p_ioctl->p_status;\r
- p_user_hdl = p_ioctl->p_hdl;\r
-\r
- if( pnp_get_flag( p_ioctl->pnp_class ) & IB_PNP_FLAG_REG_SYNC )\r
- {\r
- nt_status = ObReferenceObjectByHandle( p_ioctl->sync_event,\r
- STANDARD_RIGHTS_ALL, *ExEventObjectType, h_ioctl->RequestorMode,\r
- (PVOID*)&p_sync_event, NULL );\r
- if( !NT_SUCCESS( nt_status ) )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid sync event handle\n") );\r
- return CL_INVALID_PARAMETER;\r
- }\r
- }\r
- else\r
- {\r
- p_sync_event = NULL;\r
- }\r
-\r
- cl_mutex_acquire( &p_context->pnp_mutex );\r
- status = al_reg_pnp( p_context->h_al, &pnp_req, p_sync_event, &h_pnp );\r
- if( status == IB_SUCCESS )\r
- {\r
- CL_ASSERT( h_pnp );\r
- h_pnp->p_rearm_irp = h_ioctl;\r
-\r
- h_ioctl->Tail.Overlay.DriverContext[0] = (void*)(size_t)h_pnp->obj.hdl;\r
-#pragma warning(push, 3)\r
- IoSetCancelRoutine( h_ioctl, __cancel_rearm_pnp );\r
-#pragma warning(pop)\r
- IoMarkIrpPending( h_ioctl );\r
-\r
- cl_copy_to_user( p_user_hdl, &h_pnp->obj.hdl, sizeof(uint64_t) );\r
-\r
- /* Mark the registration as a user-mode one. */\r
- h_pnp->obj.type |= AL_OBJ_SUBTYPE_UM_EXPORT;\r
- h_pnp->obj.hdl_valid = TRUE;\r
- deref_al_obj( &h_pnp->obj );\r
-\r
- cl_status = CL_PENDING;\r
- }\r
- else\r
- {\r
- cl_status = CL_INVALID_PARAMETER;\r
- }\r
-\r
- cl_copy_to_user( p_user_status, &status, sizeof(ib_api_status_t) );\r
- cl_mutex_release( &p_context->pnp_mutex );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return cl_status;\r
-}\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_REG_PNP:\r
- */\r
-static cl_status_t\r
-proxy_poll_pnp(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_poll_pnp_ioctl_t *p_ioctl;\r
- al_dev_open_context_t *p_context;\r
- proxy_pnp_evt_t *p_evt;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_context = p_open_context;\r
-\r
- if( cl_ioctl_in_size( h_ioctl ) < sizeof(uint64_t) ||\r
- cl_ioctl_out_size( h_ioctl ) < sizeof(ib_pnp_rec_t) )\r
- {\r
- AL_EXIT( AL_DBG_PNP );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- p_ioctl = cl_ioctl_in_buf( h_ioctl );\r
- CL_ASSERT( cl_ioctl_in_buf( h_ioctl ) == cl_ioctl_out_buf( h_ioctl ) );\r
-\r
- cl_spinlock_acquire( &p_context->h_al->obj.lock );\r
- p_evt = al_hdl_chk(\r
- p_context->h_al, p_ioctl->in.evt_hdl, AL_OBJ_TYPE_H_PNP_EVENT );\r
- if( p_evt )\r
- {\r
- if( cl_ioctl_out_size( h_ioctl ) < p_evt->rec_size )\r
- {\r
- cl_spinlock_release( &p_context->h_al->obj.lock );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Buffer too small!\n") );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- cl_memcpy( &p_ioctl->out.pnp_rec, p_evt + 1, p_evt->rec_size );\r
- *p_ret_bytes = p_evt->rec_size;\r
- }\r
- cl_spinlock_release( &p_context->h_al->obj.lock );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_REG_PNP:\r
- */\r
-static cl_status_t\r
-proxy_rearm_pnp(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl )\r
-{\r
- ual_rearm_pnp_ioctl_in_t *p_ioctl;\r
- al_dev_open_context_t *p_context;\r
- IO_STACK_LOCATION *p_io_stack;\r
- proxy_pnp_evt_t *p_evt;\r
- ib_pnp_handle_t h_pnp;\r
- IRP *p_old_irp;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
-\r
- p_context = p_open_context;\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
- if( (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_PNP_MGR )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Invalid file object type for request: %016I64x\n",\r
- (LONG_PTR)p_io_stack->FileObject->FsContext2) );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_rearm_pnp_ioctl_in_t) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(ual_rearm_pnp_ioctl_out_t) )\r
- {\r
- AL_EXIT( AL_DBG_PNP );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- p_ioctl = cl_ioctl_in_buf( h_ioctl );\r
-\r
- h_pnp = (al_pnp_t*)\r
- al_hdl_ref( p_context->h_al, p_ioctl->pnp_hdl, AL_OBJ_TYPE_H_PNP );\r
- if( !h_pnp )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_PNP,\r
- ("Invalid PNP handle.\n") );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-#pragma warning(push, 3)\r
- IoSetCancelRoutine( h_ioctl, __cancel_rearm_pnp );\r
-#pragma warning(pop)\r
- IoMarkIrpPending( h_ioctl );\r
- h_ioctl->Tail.Overlay.DriverContext[0] = (void*)(size_t)h_pnp->obj.hdl;\r
-\r
- /*\r
- * Update the object context before signalling the event since that value\r
- * is returned by the PnP callback.\r
- */\r
- p_old_irp = InterlockedExchangePointer( &h_pnp->p_rearm_irp, h_ioctl );\r
- if( p_old_irp )\r
- {\r
-#pragma warning(push, 3)\r
- IoSetCancelRoutine( p_old_irp, NULL );\r
-#pragma warning(pop)\r
- /* Complete the IRP. */\r
- p_old_irp->IoStatus.Status = STATUS_CANCELLED;\r
- p_old_irp->IoStatus.Information = 0;\r
- IoCompleteRequest( p_old_irp, IO_NO_INCREMENT );\r
- }\r
-\r
- cl_spinlock_acquire( &p_context->h_al->obj.lock );\r
- p_evt = al_hdl_chk(\r
- p_context->h_al, p_ioctl->last_evt_hdl, AL_OBJ_TYPE_H_PNP_EVENT );\r
- if( p_evt )\r
- {\r
- p_evt->evt_context = p_ioctl->last_evt_context;\r
- p_evt->evt_status = p_ioctl->last_evt_status;\r
- cl_event_signal( &p_evt->event );\r
- }\r
- cl_spinlock_release( &p_context->h_al->obj.lock );\r
-\r
- deref_al_obj( &h_pnp->obj );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return CL_PENDING;\r
-}\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_DEREG_PNP:\r
- */\r
-static cl_status_t\r
-proxy_dereg_pnp(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl )\r
-{\r
- uint64_t *p_hdl;\r
- al_dev_open_context_t *p_context;\r
- IO_STACK_LOCATION *p_io_stack;\r
- ib_pnp_handle_t h_pnp;\r
-\r
- AL_ENTER( AL_DBG_PNP );\r
- p_context = p_open_context;\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
- if( (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_PNP_MGR )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Invalid file object type for request: %016I64x\n",\r
- (LONG_PTR)p_io_stack->FileObject->FsContext2) );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- if( cl_ioctl_in_size( h_ioctl ) < sizeof(ual_dereg_pnp_ioctl_t) ||\r
- cl_ioctl_out_size( h_ioctl ) )\r
- {\r
- AL_EXIT( AL_DBG_DEV );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- p_hdl = cl_ioctl_in_buf( h_ioctl );\r
-\r
- h_pnp = (ib_pnp_handle_t)\r
- al_hdl_ref( p_context->h_al, *p_hdl, AL_OBJ_TYPE_H_PNP );\r
- if( !h_pnp )\r
- {\r
- AL_EXIT( AL_DBG_DEV );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- h_pnp->p_dereg_irp = h_ioctl;\r
-\r
- IoMarkIrpPending( h_ioctl );\r
-\r
- h_pnp->obj.pfn_destroy( &h_pnp->obj, NULL );\r
-\r
- AL_EXIT( AL_DBG_PNP );\r
- return CL_PENDING;\r
-}\r
-\r
-\r
-\r
-cl_status_t\r
-al_ioctl(\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- cl_status_t cl_status;\r
- IO_STACK_LOCATION *p_io_stack;\r
- void *p_context;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- CL_ASSERT( h_ioctl && p_ret_bytes );\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
- p_context = p_io_stack->FileObject->FsContext;\r
-\r
- if( !p_context )\r
- {\r
- AL_EXIT( AL_DBG_DEV );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- switch( cl_ioctl_ctl_code( h_ioctl ) )\r
- {\r
- case UAL_REG_SHMID:\r
- cl_status = proxy_reg_shmid( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_GET_CA_ATTR_INFO:\r
- cl_status = proxy_get_ca_attr( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_REG_PNP:\r
- cl_status = proxy_reg_pnp( p_context, h_ioctl );\r
- break;\r
- case UAL_POLL_PNP:\r
- cl_status = proxy_poll_pnp( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_REARM_PNP:\r
- cl_status = proxy_rearm_pnp( p_context, h_ioctl );\r
- break;\r
- case UAL_DEREG_PNP:\r
- cl_status = proxy_dereg_pnp( p_context, h_ioctl );\r
- break;\r
- default:\r
- cl_status = CL_INVALID_PARAMETER;\r
- break;\r
- }\r
-\r
- AL_EXIT( AL_DBG_DEV );\r
- return cl_status;\r
-}\r
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_proxy_cep.c 931 2008-01-31 09:20:41Z leonidk $\r
- */\r
-\r
-\r
-#include "al_debug.h"\r
-#if defined(EVENT_TRACING)\r
-#ifdef offsetof\r
-#undef offsetof\r
-#endif\r
-#include "al_proxy_cep.tmh"\r
-#endif\r
-#include "al_cm_cep.h"\r
-#include "al_dev.h"\r
-#include <iba/ib_al_ioctl.h>\r
-#include "al_proxy.h"\r
-#include "al.h"\r
-#include "al_qp.h"\r
-\r
-\r
-static cl_status_t\r
-proxy_create_cep(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
- void* __ptr64 * p_user_context;\r
- ual_create_cep_ioctl_t *p_ioctl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
- p_ioctl = (ual_create_cep_ioctl_t*)cl_ioctl_out_buf( h_ioctl );\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(void* __ptr64) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(ual_create_cep_ioctl_t) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- p_user_context = cl_ioctl_in_buf( h_ioctl );\r
-\r
- /* We use IRPs as notification mechanism so the callback is NULL. */\r
- p_ioctl->status = al_create_cep( p_context->h_al, NULL,\r
- *p_user_context, &p_ioctl->cid );\r
-\r
- *p_ret_bytes = sizeof(ual_create_cep_ioctl_t);\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-static inline void\r
-__complete_get_event_ioctl(\r
- IN ib_al_handle_t h_al,\r
- IN IRP* const p_irp,\r
- IN NTSTATUS status )\r
-{\r
-#pragma warning(push, 3)\r
- IoSetCancelRoutine( p_irp, NULL );\r
-#pragma warning(pop)\r
-\r
- /* Complete the IRP. */\r
- p_irp->IoStatus.Status = status;\r
- p_irp->IoStatus.Information = 0;\r
- IoCompleteRequest( p_irp, IO_NETWORK_INCREMENT );\r
-\r
- deref_al_obj( &h_al->obj );\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_destroy_cep(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- UNUSED_PARAM( p_ret_bytes );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- al_destroy_cep( p_context->h_al,\r
- *(net32_t*)cl_ioctl_in_buf( h_ioctl ), NULL );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_cep_listen(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
- ual_cep_listen_ioctl_t *p_ioctl;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
- p_ioctl = (ual_cep_listen_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_listen_ioctl_t) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the private data compare buffer to our kernel copy. */\r
- if( p_ioctl->cep_listen.p_cmp_buf )\r
- p_ioctl->cep_listen.p_cmp_buf = p_ioctl->compare;\r
-\r
- status =\r
- al_cep_listen( p_context->h_al, p_ioctl->cid, &p_ioctl->cep_listen );\r
-\r
- (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = status;\r
-\r
- *p_ret_bytes = sizeof(ib_api_status_t);\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_cep_pre_req(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
- ual_cep_req_ioctl_t *p_ioctl;\r
- ib_qp_handle_t h_qp;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
- p_ioctl = (ual_cep_req_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(struct _ual_cep_req_ioctl_in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(struct _ual_cep_req_ioctl_out) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- *p_ret_bytes = sizeof(struct _ual_cep_req_ioctl_out);\r
-\r
- p_ioctl->in.cm_req.h_al = p_context->h_al;\r
- p_ioctl->in.cm_req.p_primary_path = &p_ioctl->in.paths[0];\r
- if( p_ioctl->in.cm_req.p_alt_path )\r
- p_ioctl->in.cm_req.p_alt_path = &p_ioctl->in.paths[1];\r
- if( p_ioctl->in.cm_req.p_compare_buffer )\r
- p_ioctl->in.cm_req.p_compare_buffer = p_ioctl->in.compare;\r
- if( p_ioctl->in.cm_req.p_req_pdata )\r
- p_ioctl->in.cm_req.p_req_pdata = p_ioctl->in.pdata;\r
-\r
- /* Get the kernel QP handle. */\r
- h_qp = (ib_qp_handle_t)al_hdl_ref(\r
- p_context->h_al, (uint64_t)p_ioctl->in.cm_req.h_qp, AL_OBJ_TYPE_H_QP );\r
- if( !h_qp )\r
- {\r
- p_ioctl->out.status = IB_INVALID_QP_HANDLE;\r
- goto done;\r
- }\r
-\r
- p_ioctl->in.cm_req.h_qp = h_qp;\r
-\r
- p_ioctl->out.status = al_cep_pre_req( p_context->h_al, p_ioctl->in.cid,\r
- &p_ioctl->in.cm_req, &p_ioctl->out.init );\r
-\r
- deref_al_obj( &h_qp->obj );\r
-\r
- if( p_ioctl->out.status != IB_SUCCESS )\r
- {\r
-done:\r
- cl_memclr( &p_ioctl->out.init, sizeof(ib_qp_mod_t) );\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_cep_send_req(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_send_req(\r
- p_context->h_al, *(net32_t*)cl_ioctl_in_buf( h_ioctl ) );\r
-\r
- *p_ret_bytes = sizeof(ib_api_status_t);\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_cep_pre_rep(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
- ual_cep_rep_ioctl_t *p_ioctl;\r
- ib_qp_handle_t h_qp;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
- p_ioctl = (ual_cep_rep_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(struct _ual_cep_rep_ioctl_in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(struct _ual_cep_rep_ioctl_out) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- *p_ret_bytes = sizeof(struct _ual_cep_rep_ioctl_out);\r
-\r
- if( p_ioctl->in.cm_rep.p_rep_pdata )\r
- p_ioctl->in.cm_rep.p_rep_pdata = p_ioctl->in.pdata;\r
-\r
- /* Get the kernel QP handle. */\r
- h_qp = (ib_qp_handle_t)al_hdl_ref(\r
- p_context->h_al, (uint64_t)p_ioctl->in.cm_rep.h_qp, AL_OBJ_TYPE_H_QP );\r
- if( !h_qp )\r
- {\r
- p_ioctl->out.status = IB_INVALID_QP_HANDLE;\r
- goto done;\r
- }\r
-\r
- p_ioctl->in.cm_rep.h_qp = h_qp;\r
-\r
- p_ioctl->out.status = al_cep_pre_rep( p_context->h_al, p_ioctl->in.cid,\r
- p_ioctl->in.context, &p_ioctl->in.cm_rep, &p_ioctl->out.init );\r
-\r
- deref_al_obj( &h_qp->obj );\r
-\r
- if( p_ioctl->out.status != IB_SUCCESS )\r
- {\r
-done:\r
- cl_memclr( &p_ioctl->out.init, sizeof(ib_qp_mod_t) );\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_cep_send_rep(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_send_rep(\r
- p_context->h_al, *(net32_t*)cl_ioctl_in_buf( h_ioctl ) );\r
-\r
- *p_ret_bytes = sizeof(ib_api_status_t);\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_cep_get_rtr(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
- ual_cep_get_rtr_ioctl_t *p_ioctl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
- p_ioctl = (ual_cep_get_rtr_ioctl_t*)cl_ioctl_out_buf( h_ioctl );\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(ual_cep_get_rtr_ioctl_t) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- *p_ret_bytes = sizeof(ual_cep_get_rtr_ioctl_t);\r
-\r
- p_ioctl->status = al_cep_get_rtr_attr( p_context->h_al,\r
- *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->rtr );\r
-\r
- if( p_ioctl->status != IB_SUCCESS )\r
- cl_memclr( &p_ioctl->rtr, sizeof(ib_qp_mod_t) );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_cep_get_rts(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
- ual_cep_get_rts_ioctl_t *p_ioctl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
- p_ioctl = (ual_cep_get_rts_ioctl_t*)cl_ioctl_out_buf( h_ioctl );\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(ual_cep_get_rts_ioctl_t) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- *p_ret_bytes = sizeof(ual_cep_get_rts_ioctl_t);\r
-\r
- p_ioctl->status = al_cep_get_rts_attr( p_context->h_al,\r
- *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->rts );\r
-\r
- if( p_ioctl->status != IB_SUCCESS )\r
- cl_memclr( &p_ioctl->rts, sizeof(ib_qp_mod_t) );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_cep_rtu(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
- ual_cep_rtu_ioctl_t *p_ioctl;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
- p_ioctl = (ual_cep_rtu_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_rtu_ioctl_t) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- status = al_cep_rtu( p_context->h_al,\r
- p_ioctl->cid, p_ioctl->pdata, p_ioctl->pdata_len );\r
-\r
- (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = status;\r
-\r
- *p_ret_bytes = sizeof(ib_api_status_t);\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_cep_rej(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
- ual_cep_rej_ioctl_t *p_ioctl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
- p_ioctl = (ual_cep_rej_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_rej_ioctl_t) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_rej(\r
- p_context->h_al, p_ioctl->cid, p_ioctl->rej_status, p_ioctl->ari,\r
- p_ioctl->ari_len, p_ioctl->pdata, p_ioctl->pdata_len );\r
-\r
- *p_ret_bytes = sizeof(ib_api_status_t);\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_cep_mra(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
- ual_cep_mra_ioctl_t *p_ioctl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
- p_ioctl = (ual_cep_mra_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_mra_ioctl_t) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- p_ioctl->cm_mra.p_mra_pdata = p_ioctl->pdata;\r
-\r
- (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_mra(\r
- p_context->h_al, p_ioctl->cid, &p_ioctl->cm_mra );\r
-\r
- *p_ret_bytes = sizeof(ib_api_status_t);\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_cep_lap(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
- ual_cep_lap_ioctl_t *p_ioctl;\r
- ib_api_status_t status;\r
- ib_qp_handle_t h_qp;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
- p_ioctl = (ual_cep_lap_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_lap_ioctl_t) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- *p_ret_bytes = sizeof(ib_api_status_t);\r
-\r
- p_ioctl->cm_lap.p_alt_path = &p_ioctl->alt_path;\r
- if( p_ioctl->cm_lap.p_lap_pdata )\r
- p_ioctl->pdata;\r
-\r
- /* Get the kernel QP handle. */\r
- h_qp = (ib_qp_handle_t)al_hdl_ref(\r
- p_context->h_al, (uint64_t)p_ioctl->cm_lap.h_qp, AL_OBJ_TYPE_H_QP );\r
- if( !h_qp )\r
- {\r
- status = IB_INVALID_QP_HANDLE;\r
- goto done;\r
- }\r
-\r
- p_ioctl->cm_lap.h_qp = h_qp;\r
-\r
- status = al_cep_lap( p_context->h_al, p_ioctl->cid, &p_ioctl->cm_lap );\r
-\r
- deref_al_obj( &h_qp->obj );\r
-\r
-done:\r
- (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = status;\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_cep_pre_apr(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
- ual_cep_apr_ioctl_t *p_ioctl;\r
- ib_qp_handle_t h_qp;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
- p_ioctl = (ual_cep_apr_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(struct _ual_cep_apr_ioctl_in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(struct _ual_cep_apr_ioctl_out) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- *p_ret_bytes = sizeof(struct _ual_cep_apr_ioctl_out);\r
-\r
- if( p_ioctl->in.cm_apr.p_info )\r
- p_ioctl->in.cm_apr.p_info = (ib_apr_info_t*)p_ioctl->in.apr_info;\r
- if( p_ioctl->in.cm_apr.p_apr_pdata )\r
- p_ioctl->in.cm_apr.p_apr_pdata = p_ioctl->in.pdata;\r
-\r
- /* Get the kernel QP handle. */\r
- h_qp = (ib_qp_handle_t)al_hdl_ref(\r
- p_context->h_al, (uint64_t)p_ioctl->in.cm_apr.h_qp, AL_OBJ_TYPE_H_QP );\r
- if( !h_qp )\r
- {\r
- p_ioctl->out.status = IB_INVALID_QP_HANDLE;\r
- goto done;\r
- }\r
-\r
- p_ioctl->in.cm_apr.h_qp = h_qp;\r
-\r
- p_ioctl->out.status = al_cep_pre_apr( p_context->h_al, p_ioctl->in.cid,\r
- &p_ioctl->in.cm_apr, &p_ioctl->out.apr );\r
-\r
- deref_al_obj( &h_qp->obj );\r
-\r
- if( p_ioctl->out.status != IB_SUCCESS )\r
- {\r
-done:\r
- cl_memclr( &p_ioctl->out.apr, sizeof(ib_qp_mod_t) );\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_cep_send_apr(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- UNUSED_PARAM( p_ret_bytes );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_send_apr(\r
- p_context->h_al, *(net32_t*)cl_ioctl_in_buf( h_ioctl ) );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_cep_dreq(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
- ual_cep_dreq_ioctl_t *p_ioctl;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
- p_ioctl = (ual_cep_dreq_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_dreq_ioctl_t) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the private data compare buffer to our kernel copy. */\r
- status = al_cep_dreq( p_context->h_al,\r
- p_ioctl->cid, p_ioctl->pdata, p_ioctl->pdata_len );\r
-\r
- (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = status;\r
-\r
- *p_ret_bytes = sizeof(ib_api_status_t);\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_cep_drep(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
- ual_cep_drep_ioctl_t *p_ioctl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
- p_ioctl = (ual_cep_drep_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_drep_ioctl_t) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- p_ioctl->cm_drep.p_drep_pdata = p_ioctl->pdata;\r
-\r
- (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_drep(\r
- p_context->h_al, p_ioctl->cid, &p_ioctl->cm_drep );\r
-\r
- *p_ret_bytes = sizeof(ib_api_status_t);\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_cep_get_timewait(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
- ual_cep_get_timewait_ioctl_t *p_ioctl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
- p_ioctl = (ual_cep_get_timewait_ioctl_t*)cl_ioctl_out_buf( h_ioctl );\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(ual_cep_get_timewait_ioctl_t) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- p_ioctl->status = al_cep_get_timewait( p_context->h_al,\r
- *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->timewait_us );\r
-\r
- *p_ret_bytes = sizeof(ual_cep_get_timewait_ioctl_t);\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_cep_poll(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
- ual_cep_poll_ioctl_t *p_ioctl;\r
- ib_mad_element_t *p_mad = NULL;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
- p_ioctl = (ual_cep_poll_ioctl_t*)cl_ioctl_out_buf( h_ioctl );\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(ual_cep_poll_ioctl_t) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- *p_ret_bytes = sizeof(ual_cep_poll_ioctl_t);\r
-\r
- p_ioctl->status = al_cep_poll( p_context->h_al,\r
- *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->context,\r
- &p_ioctl->new_cid, &p_mad );\r
-\r
- if( p_ioctl->status == IB_SUCCESS )\r
- {\r
- /* Copy the MAD for user consumption and free the it. */\r
- CL_ASSERT( p_mad );\r
- p_ioctl->element = *p_mad;\r
- if( p_mad->grh_valid )\r
- p_ioctl->grh = *p_mad->p_grh;\r
- else\r
- cl_memclr( &p_ioctl->grh, sizeof(ib_grh_t) );\r
- cl_memcpy( p_ioctl->mad_buf, p_mad->p_mad_buf, MAD_BLOCK_SIZE );\r
- ib_put_mad( p_mad );\r
- }\r
- else\r
- {\r
- cl_memclr( &p_ioctl->mad_buf, sizeof(MAD_BLOCK_SIZE) );\r
- p_ioctl->new_cid = AL_INVALID_CID;\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_cep_get_event(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- NTSTATUS status;\r
- IO_STACK_LOCATION *p_io_stack;\r
- al_dev_open_context_t *p_context;\r
- net32_t cid;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- UNUSED_PARAM( p_ret_bytes );\r
-\r
- p_context = p_open_context;\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
- if( (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_CM )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Invalid file object type for request: %016I64x\n",\r
- (LONG_PTR)p_io_stack->FileObject->FsContext2) );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Check the size of the ioctl */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid IOCTL input buffer.\n") );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- cid = *(net32_t*)cl_ioctl_in_buf( h_ioctl );\r
-\r
- status = al_cep_queue_irp( p_context->h_al, cid, h_ioctl );\r
- if( status != STATUS_PENDING )\r
- {\r
- /* Invalid CID. Complete the request. */\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_PENDING;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_cep_get_req_cid(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- cl_status_t cl_status;\r
- al_dev_open_context_t *p_context;\r
- UNUSED_PARAM(p_ret_bytes);\r
- \r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(uint32_t) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(uint32_t) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* get CID */\r
- cl_status = al_cep_get_cid( p_context->h_al, \r
- *(uint32_t*)cl_ioctl_in_buf( h_ioctl ), h_ioctl );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return cl_status;\r
-}\r
-\r
-\r
-\r
-static cl_status_t\r
-proxy_cep_get_pdata(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- al_dev_open_context_t *p_context;\r
- ual_cep_get_pdata_ioctl_t *p_ioctl;\r
- ib_qp_handle_t h_qp = NULL;\r
- net32_t cid;\r
- ib_api_status_t status;\r
- cl_status_t cl_status;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
- p_ioctl = (ual_cep_get_pdata_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
-\r
- /* Validate user parameters. */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(struct _ual_cep_get_pdata_ioctl_in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(struct _ual_cep_get_pdata_ioctl_out) )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, \r
- ("Incorrect sizes: in %d, out %d (expected - %d, %d)\n",\r
- cl_ioctl_in_size( h_ioctl ), cl_ioctl_out_size( h_ioctl ), \r
- sizeof(struct _ual_cep_get_pdata_ioctl_in),\r
- sizeof(struct _ual_cep_get_pdata_ioctl_out) ) );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- *p_ret_bytes = sizeof(struct _ual_cep_get_pdata_ioctl_out);\r
-\r
- if ( p_ioctl->in.h_qp )\r
- {\r
- /* Get the kernel QP handle. */\r
- h_qp = (ib_qp_handle_t)al_hdl_ref(\r
- p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP );\r
- if( !h_qp )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, \r
- ("Invalid QP handle\n"));\r
- return CL_CONNECTION_INVALID;\r
- }\r
- cid = ((al_conn_qp_t*)h_qp)->cid;\r
- }\r
- else\r
- {\r
- cid = p_ioctl->in.cid;\r
- }\r
-\r
- p_ioctl->out.pdata_len = sizeof(p_ioctl->out.pdata);\r
- status = al_cep_get_pdata( p_context->h_al, cid, \r
- (uint8_t*)&p_ioctl->out.pdata_len, p_ioctl->out.pdata );\r
-\r
- if ( status == IB_SUCCESS )\r
- {\r
- cl_status = CL_SUCCESS;\r
- *p_ret_bytes = p_ioctl->out.pdata_len;\r
- AL_PRINT(TRACE_LEVEL_INFORMATION ,AL_DBG_CM ,\r
- ("proxy_cep_get_pdata: get %d of pdata \n", (int)*p_ret_bytes ));\r
- }\r
- else\r
- {\r
- cl_status = CL_CONNECTION_INVALID;\r
- }\r
-\r
- if ( h_qp )\r
- deref_al_obj( &h_qp->obj );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return cl_status;\r
-}\r
-\r
-cl_status_t cep_ioctl(\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- cl_status_t cl_status;\r
- IO_STACK_LOCATION *p_io_stack;\r
- void *p_context;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- CL_ASSERT( h_ioctl && p_ret_bytes );\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
- p_context = p_io_stack->FileObject->FsContext;\r
-\r
- if( !p_context )\r
- {\r
- AL_EXIT( AL_DBG_DEV );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- switch( cl_ioctl_ctl_code( h_ioctl ) )\r
- {\r
- case UAL_CREATE_CEP:\r
- cl_status = proxy_create_cep( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_DESTROY_CEP:\r
- cl_status = proxy_destroy_cep( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CEP_LISTEN:\r
- cl_status = proxy_cep_listen( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CEP_PRE_REQ:\r
- cl_status = proxy_cep_pre_req( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CEP_SEND_REQ:\r
- cl_status = proxy_cep_send_req( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CEP_PRE_REP:\r
- cl_status = proxy_cep_pre_rep( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CEP_SEND_REP:\r
- cl_status = proxy_cep_send_rep( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CEP_GET_RTR:\r
- cl_status = proxy_cep_get_rtr( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CEP_GET_RTS:\r
- cl_status = proxy_cep_get_rts( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CEP_RTU:\r
- cl_status = proxy_cep_rtu( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CEP_REJ:\r
- cl_status = proxy_cep_rej( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CEP_MRA:\r
- cl_status = proxy_cep_mra( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CEP_LAP:\r
- cl_status = proxy_cep_lap( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CEP_PRE_APR:\r
- cl_status = proxy_cep_pre_apr( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CEP_SEND_APR:\r
- cl_status = proxy_cep_send_apr( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CEP_DREQ:\r
- cl_status = proxy_cep_dreq( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CEP_DREP:\r
- cl_status = proxy_cep_drep( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CEP_GET_TIMEWAIT:\r
- cl_status = proxy_cep_get_timewait( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CEP_GET_EVENT:\r
- cl_status = proxy_cep_get_event( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CEP_POLL:\r
- cl_status = proxy_cep_poll( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CEP_GET_REQ_CID:\r
- cl_status = proxy_cep_get_req_cid( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CEP_GET_PDATA:\r
- cl_status = proxy_cep_get_pdata( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- default:\r
- cl_status = CL_INVALID_PARAMETER;\r
- break;\r
- }\r
-\r
- AL_EXIT( AL_DBG_DEV );\r
- return cl_status;\r
-}\r
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_proxy_cm.c 381 2006-06-08 17:10:14Z ftillier $\r
- */\r
-\r
-\r
-#include <complib/comp_lib.h>\r
-#include <iba/ib_al.h>\r
-#include <iba/ib_al_ioctl.h>\r
-#include "al.h"\r
-#include "al_debug.h"\r
-#include "al_dev.h"\r
-#include "al_cm_cep.h"\r
-#include "al_qp.h"\r
-#include "al_proxy.h"\r
-\r
-\r
-\r
-/*\r
- * Process a received CM REQ message.\r
- */\r
-void proxy_cm_req_cb(\r
- IN ib_cm_req_rec_t *p_cm_req_rec )\r
-{\r
- cm_cb_ioctl_info_t cb_info;\r
- struct _cm_req_cb_ioctl_rec *p_ioctl_rec; /* short-cut ptr to CM req */\r
- al_dev_open_context_t *p_context;\r
- uint8_t *dest;\r
- uint64_t hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- cl_memclr(&cb_info, sizeof(cb_info));\r
-\r
- cb_info.rec_type = CM_REQ_REC;\r
- p_ioctl_rec = &cb_info.ioctl_rec.cm_req_cb_ioctl_rec;\r
- cl_memcpy(&p_ioctl_rec->req_rec, p_cm_req_rec, sizeof(ib_cm_req_rec_t));\r
-\r
- /* set up the context to be returned to user */\r
- if( p_cm_req_rec->h_cm_listen )\r
- {\r
- p_cm_req_rec->h_cm_listen->obj.hdl_valid = TRUE;\r
- p_ioctl_rec->req_rec.h_cm_listen =\r
- (ib_listen_handle_t)p_cm_req_rec->h_cm_listen->obj.hdl;\r
- }\r
-\r
- p_context = p_cm_req_rec->h_cm_req->h_al->p_context;\r
-\r
- CL_ASSERT(p_context);\r
-\r
- /* Copy the necessary QP attributes to the user. */\r
- cl_memcpy( &p_ioctl_rec->qp_mod_rtr,\r
- &p_cm_req_rec->h_cm_req->p_req_info->qp_mod_rtr,\r
- sizeof( ib_qp_mod_t ) );\r
- cl_memcpy( &p_ioctl_rec->qp_mod_rts,\r
- &p_cm_req_rec->h_cm_req->p_req_info->qp_mod_rts,\r
- sizeof( ib_qp_mod_t ) );\r
- p_ioctl_rec->timeout_ms = p_cm_req_rec->h_cm_req->retry_timeout *\r
- p_cm_req_rec->h_cm_req->max_cm_retries + 2000;\r
-\r
- if( p_cm_req_rec->qp_type == IB_QPT_UNRELIABLE_DGRM)\r
- {\r
- dest = (uint8_t*)&p_ioctl_rec->cm_req_pdata_rec.sidr_req_pdata;\r
- cl_memcpy( dest,p_cm_req_rec->p_req_pdata, IB_SIDR_REQ_PDATA_SIZE );\r
- }\r
- else\r
- {\r
- dest = (uint8_t*)&p_ioctl_rec->cm_req_pdata_rec.req_pdata;\r
- cl_memcpy( dest,p_cm_req_rec->p_req_pdata, IB_REQ_PDATA_SIZE );\r
- }\r
-\r
- p_ioctl_rec->req_rec.p_req_pdata = NULL;\r
-\r
- /*\r
- * If we're already closing the device - do not queue a callback, since\r
- * we're cleaning up the callback lists.\r
- */\r
- if( proxy_context_ref( p_context ) )\r
- {\r
- hdl = al_hdl_lock_insert( p_context->h_al, p_cm_req_rec->h_cm_req,\r
- AL_OBJ_TYPE_H_CONN | AL_OBJ_SUBTYPE_REQ );\r
- }\r
- else\r
- {\r
- hdl = AL_INVALID_HANDLE;\r
- }\r
-\r
- if( hdl == AL_INVALID_HANDLE )\r
- {\r
- ib_cm_rej_t cm_rej;\r
-\r
- /* Reject the request. */\r
- cl_memclr( &cm_rej, sizeof( ib_cm_rej_t ) );\r
- cm_rej.rej_status = IB_REJ_TIMEOUT;\r
- ib_cm_rej( p_cm_req_rec->h_cm_req, &cm_rej );\r
- }\r
- else\r
- {\r
- p_ioctl_rec->req_rec.h_cm_req = (ib_cm_handle_t)hdl;\r
- /* TODO: handle failure. */\r
- proxy_queue_cb_buf( UAL_GET_CM_CB_INFO, p_context, &cb_info, NULL );\r
- }\r
- proxy_context_deref( p_context );\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-\r
-void proxy_cm_dreq_cb(\r
- IN ib_cm_dreq_rec_t *p_cm_dreq_rec )\r
-{\r
- cm_cb_ioctl_info_t cb_info;\r
- struct _cm_dreq_cb_ioctl_rec *p_ioctl_rec;\r
- al_dev_open_context_t *p_context;\r
- uint64_t hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- cl_memclr(&cb_info, sizeof(cb_info));\r
- cb_info.rec_type = CM_DREQ_REC;\r
- p_ioctl_rec = &cb_info.ioctl_rec.cm_dreq_cb_ioctl_rec;\r
- cl_memcpy(&p_ioctl_rec->dreq_rec, p_cm_dreq_rec, sizeof(ib_cm_dreq_rec_t));\r
-\r
- p_context = p_cm_dreq_rec->h_cm_dreq->h_al->p_context;\r
-\r
- cl_memcpy( &p_ioctl_rec->dreq_pdata, p_cm_dreq_rec->p_dreq_pdata,\r
- IB_DREQ_PDATA_SIZE );\r
- p_ioctl_rec->dreq_rec.p_dreq_pdata = NULL;\r
-\r
- /*\r
- * If we're already closing the device - do not queue a callback, since\r
- * we're cleaning up the callback lists.\r
- */\r
- if( proxy_context_ref( p_context ) )\r
- {\r
- hdl = al_hdl_lock_insert( p_context->h_al, p_cm_dreq_rec->h_cm_dreq,\r
- AL_OBJ_TYPE_H_CONN | AL_OBJ_SUBTYPE_DREQ );\r
- }\r
- else\r
- {\r
- hdl = AL_INVALID_HANDLE;\r
- }\r
-\r
- if( hdl != AL_INVALID_HANDLE )\r
- {\r
- p_ioctl_rec->dreq_rec.h_cm_dreq = (ib_cm_handle_t)hdl;\r
- if( !proxy_queue_cb_buf( UAL_GET_CM_CB_INFO, p_context,\r
- &cb_info, &p_cm_dreq_rec->h_cm_dreq->h_qp->obj ) )\r
- {\r
- /* Remove handle from map. */\r
- al_hdl_get_conn( p_context->h_al, hdl, AL_OBJ_SUBTYPE_DREQ );\r
-\r
- goto err;\r
- }\r
- }\r
- else\r
- {\r
- ib_cm_drep_t cm_drep;\r
-err:\r
-\r
- /* Send a drep. */\r
- cl_memclr( &cm_drep, sizeof( ib_cm_drep_t ) );\r
- ib_cm_drep( p_cm_dreq_rec->h_cm_dreq, &cm_drep );\r
- }\r
-\r
- proxy_context_deref( p_context );\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-\r
-void\r
-proxy_cm_rtu_cb(\r
- IN ib_cm_rtu_rec_t *p_cm_rtu_rec )\r
-{\r
- cm_cb_ioctl_info_t cb_info;\r
- struct _cm_rtu_cb_ioctl_rec *p_ioctl_rec;\r
- al_dev_open_context_t *p_context;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- cl_memclr(&cb_info, sizeof(cb_info));\r
- cb_info.rec_type = CM_RTU_REC;\r
- p_ioctl_rec = &cb_info.ioctl_rec.cm_rtu_cb_ioctl_rec;\r
- cl_memcpy(&p_ioctl_rec->rtu_rec, p_cm_rtu_rec, sizeof(ib_cm_rtu_rec_t));\r
-\r
- CL_ASSERT( p_cm_rtu_rec->h_qp );\r
- p_context = p_cm_rtu_rec->h_qp->obj.h_al->p_context;\r
- p_ioctl_rec->rtu_rec.h_qp = (ib_qp_handle_t)p_cm_rtu_rec->h_qp->obj.hdl;\r
-\r
- cl_memcpy( &p_ioctl_rec->rtu_pdata,\r
- p_cm_rtu_rec->p_rtu_pdata, IB_RTU_PDATA_SIZE );\r
- p_ioctl_rec->rtu_rec.p_rtu_pdata = NULL;\r
-\r
- /*\r
- * If we're already closing the device - do not queue a callback, since\r
- * we're cleaning up the callback lists.\r
- */\r
- if( proxy_context_ref( p_context ) )\r
- {\r
- proxy_queue_cb_buf( UAL_GET_CM_CB_INFO, p_context,\r
- &cb_info, &p_cm_rtu_rec->h_qp->obj );\r
- }\r
-\r
- proxy_context_deref( p_context );\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-\r
-void proxy_cm_rep_cb(\r
- IN ib_cm_rep_rec_t *p_cm_rep_rec )\r
-{\r
- cm_cb_ioctl_info_t cb_info;\r
- struct _cm_rep_cb_ioctl_rec *p_ioctl_rec;\r
- al_dev_open_context_t *p_context;\r
- uint8_t *p_dest;\r
- uint64_t hdl;\r
- al_obj_t *p_al_obj;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- cl_memclr(&cb_info, sizeof(cb_info));\r
- cb_info.rec_type = CM_REP_REC;\r
- p_ioctl_rec = &cb_info.ioctl_rec.cm_rep_cb_ioctl_rec;\r
- p_ioctl_rec->rep_rec = *p_cm_rep_rec;\r
-\r
- if( p_cm_rep_rec->qp_type == IB_QPT_UNRELIABLE_DGRM )\r
- {\r
- p_dest = (uint8_t*)&p_ioctl_rec->cm_rep_pdata_rec.sidr_rep_pdata;\r
- cl_memcpy( p_dest, p_cm_rep_rec->p_rep_pdata,\r
- IB_SIDR_REP_PDATA_SIZE );\r
- }\r
- else\r
- {\r
- p_dest = (uint8_t*)&p_ioctl_rec->cm_rep_pdata_rec.rep_pdata;\r
- cl_memcpy( p_dest, p_cm_rep_rec->p_rep_pdata,\r
- IB_REP_PDATA_SIZE );\r
- }\r
-\r
- p_ioctl_rec->rep_rec.p_rep_pdata = NULL;\r
-\r
- CL_ASSERT( p_cm_rep_rec->h_cm_rep );\r
- p_context = p_cm_rep_rec->h_cm_rep->h_al->p_context;\r
-\r
- if( p_cm_rep_rec->qp_type == IB_QPT_RELIABLE_CONN ||\r
- p_cm_rep_rec->qp_type == IB_QPT_UNRELIABLE_CONN )\r
- {\r
- CL_ASSERT( p_cm_rep_rec->h_cm_rep->h_qp );\r
- p_al_obj = &p_cm_rep_rec->h_cm_rep->h_qp->obj;\r
-\r
- /* Copy the necessary QP attributes to the user. */\r
- cl_memcpy( &p_ioctl_rec->qp_mod_rtr,\r
- &p_cm_rep_rec->h_cm_rep->p_req_info->qp_mod_rtr,\r
- sizeof( ib_qp_mod_t ) );\r
- cl_memcpy( &p_ioctl_rec->qp_mod_rts,\r
- &p_cm_rep_rec->h_cm_rep->p_req_info->qp_mod_rts,\r
- sizeof( ib_qp_mod_t ) );\r
-\r
- /*\r
- * If we're already closing the device - do not queue a callback, since\r
- * we're cleaning up the callback lists.\r
- */\r
- if( proxy_context_ref( p_context ) )\r
- {\r
- hdl = al_hdl_lock_insert( p_context->h_al, p_cm_rep_rec->h_cm_rep,\r
- AL_OBJ_TYPE_H_CONN | AL_OBJ_SUBTYPE_REP );\r
- }\r
- else\r
- {\r
- hdl = AL_INVALID_HANDLE;\r
- }\r
-\r
- if( hdl == AL_INVALID_HANDLE )\r
- {\r
- ib_cm_rej_t cm_rej;\r
-\r
- /* Reject the connection. */\r
- proxy_context_deref( p_context );\r
- cl_memclr( &cm_rej, sizeof( ib_cm_rej_t ) );\r
- cm_rej.rej_status = IB_REJ_TIMEOUT;\r
- ib_cm_rej( p_cm_rep_rec->h_cm_rep, &cm_rej );\r
- return;\r
- }\r
- p_ioctl_rec->rep_rec.h_cm_rep = (ib_cm_handle_t)hdl;\r
- }\r
- else\r
- {\r
- CL_ASSERT( p_cm_rep_rec->qp_type == IB_QPT_UNRELIABLE_DGRM );\r
-\r
- p_al_obj = NULL;\r
-\r
- if( !proxy_context_ref( p_context ) )\r
- {\r
- proxy_context_deref( p_context );\r
- return;\r
- }\r
- p_ioctl_rec->rep_rec.h_cm_rep = NULL;\r
- }\r
-\r
- proxy_queue_cb_buf( UAL_GET_CM_CB_INFO, p_context, &cb_info, p_al_obj );\r
- proxy_context_deref( p_context );\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-\r
-void proxy_cm_drep_cb(\r
- IN ib_cm_drep_rec_t *p_cm_drep_rec)\r
-{\r
- cm_cb_ioctl_info_t cb_info;\r
- struct _cm_drep_cb_ioctl_rec *p_ioctl_rec =\r
- &cb_info.ioctl_rec.cm_drep_cb_ioctl_rec;\r
- al_dev_open_context_t *p_context;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
- cl_memclr(&cb_info, sizeof(cb_info));\r
- cb_info.rec_type = CM_DREP_REC;\r
- p_ioctl_rec->drep_rec = *p_cm_drep_rec;\r
-\r
- if( p_cm_drep_rec->p_drep_pdata )\r
- {\r
- cl_memcpy( &p_ioctl_rec->drep_pdata, p_cm_drep_rec->p_drep_pdata,\r
- IB_DREP_PDATA_SIZE );\r
- p_ioctl_rec->drep_rec.p_drep_pdata = NULL;\r
- }\r
-\r
- CL_ASSERT( p_cm_drep_rec->h_qp );\r
- p_context = p_cm_drep_rec->h_qp->obj.h_al->p_context;\r
- p_ioctl_rec->drep_rec.h_qp = (ib_qp_handle_t)p_cm_drep_rec->h_qp->obj.hdl;\r
-\r
- /*\r
- * If we're already closing the device - do not queue a callback, since\r
- * we're cleaning up the callback lists.\r
- */\r
- if( proxy_context_ref( p_context ) )\r
- {\r
- proxy_queue_cb_buf( UAL_GET_CM_CB_INFO, p_context,\r
- &cb_info, &p_cm_drep_rec->h_qp->obj );\r
- }\r
-\r
- proxy_context_deref( p_context );\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-\r
-void proxy_cm_mra_cb(\r
- IN ib_cm_mra_rec_t *p_cm_mra_rec)\r
-{\r
- cm_cb_ioctl_info_t cb_info;\r
- struct _cm_mra_cb_ioctl_rec *p_ioctl_rec =\r
- &cb_info.ioctl_rec.cm_mra_cb_ioctl_rec;\r
- al_dev_open_context_t *p_context;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- cl_memclr( &cb_info, sizeof(cb_info) );\r
- cb_info.rec_type = CM_MRA_REC;\r
- p_ioctl_rec->mra_rec = *p_cm_mra_rec;\r
- \r
- cl_memcpy( &p_ioctl_rec->mra_pdata, p_cm_mra_rec->p_mra_pdata,\r
- IB_MRA_PDATA_SIZE );\r
- p_ioctl_rec->mra_rec.p_mra_pdata = NULL;\r
-\r
- CL_ASSERT( p_cm_mra_rec->h_qp );\r
- p_ioctl_rec->mra_rec.h_qp = (ib_qp_handle_t)p_cm_mra_rec->h_qp->obj.hdl;\r
- p_context = p_cm_mra_rec->h_qp->obj.h_al->p_context;\r
-\r
- /*\r
- * If we're already closing the device - do not queue a callback, since\r
- * we're cleaning up the callback lists.\r
- */\r
- if( proxy_context_ref( p_context ) )\r
- {\r
- proxy_queue_cb_buf( UAL_GET_CM_CB_INFO, p_context,\r
- &cb_info, &p_cm_mra_rec->h_qp->obj );\r
- }\r
-\r
- proxy_context_deref( p_context );\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-\r
-void proxy_cm_rej_cb(\r
- IN ib_cm_rej_rec_t *p_cm_rej_rec)\r
-{\r
- cm_cb_ioctl_info_t cb_info;\r
- struct _cm_rej_cb_ioctl_rec *p_ioctl_rec =\r
- &cb_info.ioctl_rec.cm_rej_cb_ioctl_rec;\r
- al_dev_open_context_t *p_context;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- cl_memclr(&cb_info, sizeof(cb_info));\r
- cb_info.rec_type = CM_REJ_REC;\r
- p_ioctl_rec->rej_rec = *p_cm_rej_rec;\r
-\r
- if( p_cm_rej_rec->p_rej_pdata )\r
- {\r
- cl_memcpy( &p_ioctl_rec->rej_pdata, p_cm_rej_rec->p_rej_pdata,\r
- IB_REJ_PDATA_SIZE );\r
- p_ioctl_rec->rej_rec.p_rej_pdata = (uint8_t*)&p_ioctl_rec->rej_pdata;\r
- }\r
-\r
- if( p_cm_rej_rec->ari_length > 0 )\r
- {\r
- cl_memcpy( &p_ioctl_rec->ari_pdata, p_cm_rej_rec->p_ari,\r
- p_cm_rej_rec->ari_length );\r
- p_ioctl_rec->rej_rec.p_ari = (uint8_t*)&p_ioctl_rec->ari_pdata;\r
- }\r
- else\r
- {\r
- p_ioctl_rec->rej_rec.p_ari = NULL;\r
- }\r
-\r
- CL_ASSERT( p_cm_rej_rec->h_qp );\r
- p_ioctl_rec->rej_rec.h_qp = (ib_qp_handle_t)p_cm_rej_rec->h_qp->obj.hdl;\r
- p_context = p_cm_rej_rec->h_qp->obj.h_al->p_context;\r
-\r
- /*\r
- * If we're already closing the device - do not queue a callback, since\r
- * we're cleaning up the callback lists.\r
- */\r
- if( proxy_context_ref( p_context ) )\r
- {\r
- proxy_queue_cb_buf( UAL_GET_CM_CB_INFO, p_context,\r
- &cb_info, &p_cm_rej_rec->h_qp->obj );\r
- }\r
-\r
- proxy_context_deref( p_context );\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-\r
-void\r
-proxy_cm_lap_cb(\r
- IN ib_cm_lap_rec_t *p_cm_lap_rec)\r
-{\r
- cm_cb_ioctl_info_t cb_info;\r
- struct _cm_lap_cb_ioctl_rec *p_ioctl_rec =\r
- &cb_info.ioctl_rec.cm_lap_cb_ioctl_rec;\r
- al_dev_open_context_t *p_context;\r
- uint64_t hdl;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
- cl_memclr(&cb_info, sizeof(cb_info));\r
- cb_info.rec_type = CM_LAP_REC;\r
- p_ioctl_rec->lap_rec = *p_cm_lap_rec;\r
-\r
- if( p_cm_lap_rec->p_lap_pdata )\r
- {\r
- cl_memcpy( &p_ioctl_rec->lap_pdata, p_cm_lap_rec->p_lap_pdata,\r
- IB_LAP_PDATA_SIZE );\r
- p_ioctl_rec->lap_rec.p_lap_pdata = NULL;\r
- }\r
-\r
- p_context = p_cm_lap_rec->h_cm_lap->h_al->p_context;\r
-\r
- /*\r
- * If we're already closing the device - do not queue a callback, since\r
- * we're cleaning up the callback lists.\r
- */\r
- if( proxy_context_ref( p_context ) )\r
- {\r
- hdl = al_hdl_lock_insert( p_context->h_al, p_cm_lap_rec->h_cm_lap,\r
- AL_OBJ_TYPE_H_CONN | AL_OBJ_SUBTYPE_LAP );\r
- }\r
- else\r
- {\r
- hdl = AL_INVALID_HANDLE;\r
- }\r
-\r
- if( hdl != AL_INVALID_HANDLE )\r
- {\r
- p_ioctl_rec->lap_rec.h_cm_lap = (ib_cm_handle_t)hdl;\r
-\r
- if( !proxy_queue_cb_buf( UAL_GET_CM_CB_INFO, p_context,\r
- &cb_info, &p_cm_lap_rec->h_cm_lap->h_qp->obj ) )\r
- {\r
- al_hdl_get_conn( p_context->h_al, hdl, AL_OBJ_SUBTYPE_LAP );\r
- goto err;\r
- }\r
- }\r
- else\r
- {\r
- ib_cm_apr_t cm_apr;\r
-\r
-err:\r
- /* Reject the LAP. */\r
- cl_memclr( &cm_apr, sizeof( ib_cm_apr_t ) );\r
- cm_apr.apr_status = IB_AP_REJECT;\r
- ib_cm_apr( p_cm_lap_rec->h_cm_lap, &cm_apr );\r
- }\r
-\r
- proxy_context_deref( p_context );\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-\r
-void\r
-proxy_cm_apr_cb(\r
- IN ib_cm_apr_rec_t *p_cm_apr_rec)\r
-{\r
- cm_cb_ioctl_info_t cb_info;\r
- struct _cm_apr_cb_ioctl_rec *p_ioctl_rec =\r
- &cb_info.ioctl_rec.cm_apr_cb_ioctl_rec;\r
- al_dev_open_context_t *p_context;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
- cl_memclr( &cb_info, sizeof(cb_info) );\r
- cb_info.rec_type = CM_APR_REC;\r
- p_ioctl_rec->apr_rec = *p_cm_apr_rec;\r
-\r
- if( p_cm_apr_rec->info_length > 0 )\r
- {\r
- cl_memcpy( &p_ioctl_rec->apr_info, p_cm_apr_rec->p_info,\r
- p_cm_apr_rec->info_length );\r
- }\r
-\r
- p_ioctl_rec->apr_rec.p_info = NULL;\r
-\r
- cl_memcpy( &p_ioctl_rec->apr_pdata, p_cm_apr_rec->p_apr_pdata,\r
- IB_APR_PDATA_SIZE );\r
- p_ioctl_rec->apr_rec.p_apr_pdata = NULL;\r
-\r
- p_context = p_cm_apr_rec->h_qp->obj.h_al->p_context;\r
- p_ioctl_rec->apr_rec.h_qp = (ib_qp_handle_t)p_cm_apr_rec->h_qp->obj.hdl;\r
-\r
- /*\r
- * If we're already closing the device - do not queue a callback, since\r
- * we're cleaning up the callback lists.\r
- */\r
- if( proxy_context_ref( p_context ) )\r
- {\r
- proxy_queue_cb_buf( UAL_GET_CM_CB_INFO, p_context,\r
- &cb_info, &p_cm_apr_rec->h_qp->obj );\r
- }\r
-\r
- proxy_context_deref( p_context );\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-\r
-void\r
-proxy_listen_err_cb(\r
- IN ib_listen_err_rec_t *p_err_rec)\r
-{\r
- al_dev_open_context_t *p_context;\r
- misc_cb_ioctl_info_t cb_info;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- /*\r
- * If we're already closing the device - do not queue a callback, since\r
- * we're cleaning up the callback lists.\r
- */\r
- p_context = p_err_rec->h_cm_listen->obj.h_al->p_context;\r
- if( !proxy_context_ref( p_context ) )\r
- {\r
- proxy_context_deref( p_context );\r
- AL_EXIT( AL_DBG_CM );\r
- return;\r
- }\r
-\r
- /* Set up context and callback record type appropriate for UAL */\r
- cb_info.rec_type = LISTEN_ERROR_REC;\r
- cb_info.ioctl_rec.listen_err = *p_err_rec;\r
- cb_info.ioctl_rec.listen_err.h_cm_listen =\r
- (ib_listen_handle_t)p_err_rec->h_cm_listen->obj.hdl;\r
-\r
- /* Proxy handle must be valid now. */\r
- if( !p_err_rec->h_cm_listen->obj.hdl_valid )\r
- p_err_rec->h_cm_listen->obj.hdl_valid = TRUE;\r
-\r
- proxy_queue_cb_buf( UAL_GET_MISC_CB_INFO, p_context, &cb_info,\r
- &p_err_rec->h_cm_listen->obj );\r
- proxy_context_deref( p_context );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
-}\r
-\r
-\r
-cl_status_t\r
-proxy_cm_req(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_cm_req_ioctl_t *p_ioctl =\r
- (ual_cm_req_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_qp_handle_t h_qp;\r
- size_t in_buf_sz;\r
- uint8_t *p_buf;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Extra validation on the input buffer length. */\r
- in_buf_sz = sizeof(p_ioctl->in);\r
- if( p_ioctl->in.cm_req.p_alt_path )\r
- in_buf_sz += sizeof(ib_path_rec_t);\r
- if( p_ioctl->in.cm_req.p_compare_buffer )\r
- in_buf_sz += p_ioctl->in.cm_req.compare_length;\r
- if( p_ioctl->in.cm_req.p_req_pdata )\r
- in_buf_sz += p_ioctl->in.cm_req.req_length;\r
-\r
- if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- p_ioctl->in.cm_req.h_al = p_context->h_al;\r
-\r
- /* Validate qp handle */\r
- if( p_ioctl->in.cm_req.qp_type == IB_QPT_RELIABLE_CONN ||\r
- p_ioctl->in.cm_req.qp_type == IB_QPT_UNRELIABLE_CONN )\r
- {\r
- h_qp = (ib_qp_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP );\r
- if( !h_qp )\r
- {\r
- p_ioctl->out.status = IB_INVALID_QP_HANDLE;\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
- return CL_SUCCESS;\r
- }\r
- p_ioctl->in.cm_req.h_qp = h_qp;\r
- }\r
- else\r
- {\r
- h_qp = NULL;\r
- }\r
-\r
- /* Fixup the primary path pointer. */\r
- p_ioctl->in.cm_req.p_primary_path = p_ioctl->in.paths;\r
-\r
- p_buf = (uint8_t*)&p_ioctl->in.paths[1];\r
- /* Fixup the alternat path pointer if needed. */\r
- if( p_ioctl->in.cm_req.p_alt_path )\r
- {\r
- p_ioctl->in.cm_req.p_alt_path = (ib_path_rec_t*)p_buf;\r
- p_buf += sizeof(ib_path_rec_t);\r
- }\r
- /* Fixup the private data buffer pointer as needed. */\r
- if( p_ioctl->in.cm_req.p_req_pdata )\r
- {\r
- p_ioctl->in.cm_req.p_req_pdata = p_buf;\r
- p_buf += p_ioctl->in.cm_req.req_length;\r
- }\r
- /* Fixup the compare buffer pointer as needed. */\r
- if( p_ioctl->in.cm_req.p_compare_buffer )\r
- p_ioctl->in.cm_req.p_compare_buffer = p_buf;\r
-\r
- /* Override the user's callbacks with our own. */\r
- /* Do not change user's request from client/server to peer-to-peer. */ \r
- if( p_ioctl->in.cm_req.pfn_cm_req_cb )\r
- p_ioctl->in.cm_req.pfn_cm_req_cb = proxy_cm_req_cb;\r
- p_ioctl->in.cm_req.pfn_cm_rep_cb = proxy_cm_rep_cb;\r
- p_ioctl->in.cm_req.pfn_cm_mra_cb = proxy_cm_mra_cb;\r
- p_ioctl->in.cm_req.pfn_cm_rej_cb = proxy_cm_rej_cb;\r
-\r
- p_ioctl->out.status = ib_cm_req( &p_ioctl->in.cm_req );\r
-\r
- if( h_qp )\r
- deref_al_obj( &h_qp->obj );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-cl_status_t\r
-proxy_cm_rep(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_cm_rep_ioctl_t *p_ioctl =\r
- (ual_cm_rep_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_cm_handle_t h_cm_req;\r
- ib_qp_handle_t h_qp;\r
- size_t in_buf_sz;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- in_buf_sz = sizeof(p_ioctl->in);\r
- if( p_ioctl->in.cm_rep.p_rep_pdata )\r
- in_buf_sz += p_ioctl->in.cm_rep.rep_length;\r
-\r
- if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate QP handle. */\r
- h_qp = (ib_qp_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP );\r
- if( !h_qp )\r
- {\r
- p_ioctl->out.status = IB_INVALID_QP_HANDLE;\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
- }\r
- p_ioctl->in.cm_rep.h_qp = h_qp;\r
-\r
- /* Validate CM REQ handle */\r
- h_cm_req = al_hdl_get_conn(\r
- p_context->h_al, p_ioctl->in.h_cm_req, AL_OBJ_SUBTYPE_REQ );\r
- if( !h_cm_req )\r
- {\r
- deref_al_obj( &h_qp->obj );\r
- p_ioctl->out.status = IB_INVALID_HANDLE;\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
- }\r
-\r
- if( p_ioctl->in.cm_rep.p_rep_pdata )\r
- p_ioctl->in.cm_rep.p_rep_pdata = (uint8_t*)((&p_ioctl->in.cm_rep) + 1);\r
-\r
- /* All work requests are posted in UM. */\r
- p_ioctl->in.cm_rep.p_recv_wr = NULL;\r
-\r
- p_ioctl->in.cm_rep.pfn_cm_rtu_cb = proxy_cm_rtu_cb;\r
- p_ioctl->in.cm_rep.pfn_cm_lap_cb = proxy_cm_lap_cb;\r
- p_ioctl->in.cm_rep.pfn_cm_dreq_cb = proxy_cm_dreq_cb;\r
-\r
- p_ioctl->out.status = ib_cm_rep( h_cm_req, &p_ioctl->in.cm_rep );\r
-\r
- __deref_conn( h_cm_req );\r
- deref_al_obj( &h_qp->obj );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-cl_status_t\r
-proxy_cm_dreq(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_cm_dreq_ioctl_t *p_ioctl =\r
- (ual_cm_dreq_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_qp_handle_t h_qp;\r
- size_t in_buf_sz;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- in_buf_sz = sizeof(p_ioctl->in);\r
- if( p_ioctl->in.cm_dreq.p_dreq_pdata )\r
- in_buf_sz += p_ioctl->in.cm_dreq.dreq_length;\r
-\r
- if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate qp handle */\r
- if( p_ioctl->in.cm_dreq.qp_type == IB_QPT_RELIABLE_CONN ||\r
- p_ioctl->in.cm_dreq.qp_type == IB_QPT_UNRELIABLE_CONN )\r
- {\r
- h_qp = (ib_qp_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP );\r
- if( !h_qp )\r
- {\r
- p_ioctl->out.status = IB_INVALID_QP_HANDLE;\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
- }\r
- p_ioctl->in.cm_dreq.h_qp = h_qp;\r
- }\r
- else\r
- {\r
- h_qp = NULL;\r
- }\r
-\r
- if( p_ioctl->in.cm_dreq.p_dreq_pdata )\r
- p_ioctl->in.cm_dreq.p_dreq_pdata = (uint8_t*)((&p_ioctl->in.cm_dreq) + 1);\r
-\r
- p_ioctl->in.cm_dreq.pfn_cm_drep_cb = proxy_cm_drep_cb;\r
-\r
- p_ioctl->out.status = ib_cm_dreq( &p_ioctl->in.cm_dreq );\r
-\r
- if( h_qp )\r
- deref_al_obj( &h_qp->obj );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-cl_status_t\r
-proxy_cm_drep(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_cm_drep_ioctl_t *p_ioctl =\r
- (ual_cm_drep_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_cm_handle_t h_cm_dreq;\r
- size_t in_buf_sz;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- in_buf_sz = sizeof(p_ioctl->in);\r
- if( p_ioctl->in.cm_drep.p_drep_pdata )\r
- in_buf_sz += p_ioctl->in.cm_drep.drep_length;\r
-\r
- if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate CM REQ handle. */\r
- h_cm_dreq = al_hdl_get_conn(\r
- p_context->h_al, p_ioctl->in.h_cm_dreq, AL_OBJ_SUBTYPE_DREQ );\r
- if( !h_cm_dreq )\r
- {\r
- p_ioctl->out.status = IB_INVALID_HANDLE;\r
- return CL_SUCCESS;\r
- }\r
-\r
- if( p_ioctl->in.cm_drep.p_drep_pdata )\r
- {\r
- p_ioctl->in.cm_drep.p_drep_pdata =\r
- (uint8_t*)((&p_ioctl->in.cm_drep) + 1);\r
- }\r
-\r
- p_ioctl->out.status = ib_cm_drep( h_cm_dreq, &p_ioctl->in.cm_drep );\r
-\r
- __deref_conn( h_cm_dreq );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-cl_status_t\r
-proxy_cm_listen(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_cm_listen_ioctl_t *p_ioctl =\r
- (ual_cm_listen_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- ib_listen_handle_t h_listen;\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- size_t in_buf_sz;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- in_buf_sz = sizeof(p_ioctl->in);\r
- if( p_ioctl->in.cm_listen.p_compare_buffer )\r
- in_buf_sz += p_ioctl->in.cm_listen.compare_length;\r
-\r
- if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- if( p_ioctl->in.cm_listen.p_compare_buffer )\r
- {\r
- p_ioctl->in.cm_listen.p_compare_buffer =\r
- (uint8_t*)((&p_ioctl->in.cm_listen) + 1);\r
- }\r
-\r
- if( p_ioctl->in.cm_listen.qp_type == IB_QPT_RELIABLE_CONN ||\r
- p_ioctl->in.cm_listen.qp_type == IB_QPT_UNRELIABLE_CONN )\r
- {\r
- p_ioctl->in.cm_listen.pfn_cm_mra_cb = proxy_cm_mra_cb;\r
- p_ioctl->in.cm_listen.pfn_cm_rej_cb = proxy_cm_rej_cb;\r
- }\r
- p_ioctl->in.cm_listen.pfn_cm_req_cb = proxy_cm_req_cb;\r
-\r
- p_ioctl->out.status = cm_listen( p_context->h_al,\r
- &p_ioctl->in.cm_listen, proxy_listen_err_cb, p_ioctl->in.context,\r
- &h_listen );\r
- if( p_ioctl->out.status == IB_SUCCESS )\r
- {\r
- p_ioctl->out.h_cm_listen = h_listen->obj.hdl;\r
- h_listen->obj.hdl_valid = TRUE;\r
- deref_al_obj( &h_listen->obj );\r
- }\r
- else\r
- {\r
- p_ioctl->out.h_cm_listen = AL_INVALID_HANDLE;\r
- }\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-cl_status_t\r
-proxy_cm_cancel(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_cm_cancel_ioctl_t *p_ioctl =\r
- (ual_cm_cancel_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- ib_listen_handle_t h_cm_listen;\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate listen handle. */\r
- h_cm_listen = (ib_listen_handle_t)al_hdl_ref(\r
- p_context->h_al, p_ioctl->in.h_cm_listen, AL_OBJ_TYPE_H_LISTEN );\r
- if( !h_cm_listen )\r
- {\r
- p_ioctl->out.status = IB_INVALID_HANDLE;\r
- return CL_SUCCESS;\r
- }\r
-\r
- h_cm_listen->obj.pfn_destroy( &h_cm_listen->obj, ib_sync_destroy );\r
- p_ioctl->out.status = IB_SUCCESS;\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-cl_status_t\r
-proxy_cm_rtu(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_cm_rtu_ioctl_t *p_ioctl =\r
- (ual_cm_rtu_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_cm_handle_t h_cm_rep;\r
- size_t in_buf_sz;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- in_buf_sz = sizeof(p_ioctl->in);\r
- if( p_ioctl->in.cm_rtu.p_rtu_pdata )\r
- in_buf_sz += p_ioctl->in.cm_rtu.rtu_length;\r
-\r
- if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate CM REP handle. */\r
- h_cm_rep = al_hdl_get_conn(\r
- p_context->h_al, p_ioctl->in.h_cm_rep, AL_OBJ_SUBTYPE_REP );\r
- if( !h_cm_rep )\r
- {\r
- p_ioctl->out.status = IB_INVALID_HANDLE;\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
- }\r
-\r
- if( p_ioctl->in.cm_rtu.p_rtu_pdata )\r
- p_ioctl->in.cm_rtu.p_rtu_pdata = (uint8_t*)((&p_ioctl->in.cm_rtu) + 1);\r
-\r
- p_ioctl->in.cm_rtu.pfn_cm_dreq_cb = proxy_cm_dreq_cb;\r
- p_ioctl->in.cm_rtu.pfn_cm_apr_cb = proxy_cm_apr_cb;\r
-\r
- p_ioctl->out.status = ib_cm_rtu( h_cm_rep, &p_ioctl->in.cm_rtu );\r
-\r
- __deref_conn( h_cm_rep );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-cl_status_t\r
-proxy_cm_rej(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_cm_rej_ioctl_t *p_ioctl =\r
- (ual_cm_rej_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_cm_handle_t h_cm;\r
- size_t in_buf_sz;\r
- uint8_t *p_buf;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- in_buf_sz = sizeof(p_ioctl->in);\r
- if( p_ioctl->in.cm_rej.p_ari )\r
- in_buf_sz += p_ioctl->in.cm_rej.ari_length;\r
- if( p_ioctl->in.cm_rej.p_rej_pdata )\r
- in_buf_sz += p_ioctl->in.cm_rej.rej_length;\r
-\r
- if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate CM handle. We could reject a request or reply. */\r
- h_cm = al_hdl_get_conn( p_context->h_al, p_ioctl->in.h_cm,\r
- AL_OBJ_SUBTYPE_REQ | AL_OBJ_SUBTYPE_REP );\r
- if( !h_cm )\r
- {\r
- p_ioctl->out.status = IB_INVALID_HANDLE;\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
- }\r
-\r
- p_buf = (uint8_t*)((&p_ioctl->in.cm_rej) + 1);\r
- if( p_ioctl->in.cm_rej.p_ari )\r
- {\r
- p_ioctl->in.cm_rej.p_ari = (ib_ari_t*)p_buf;\r
- p_buf += p_ioctl->in.cm_rej.ari_length;\r
- }\r
- if( p_ioctl->in.cm_rej.p_rej_pdata )\r
- p_ioctl->in.cm_rej.p_rej_pdata = p_buf;\r
-\r
- p_ioctl->out.status = ib_cm_rej( h_cm, &p_ioctl->in.cm_rej );\r
-\r
- __deref_conn( h_cm );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-cl_status_t\r
-proxy_cm_mra(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_cm_mra_ioctl_t *p_ioctl =\r
- (ual_cm_mra_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_cm_handle_t h_cm;\r
- size_t in_buf_sz;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- in_buf_sz = sizeof(p_ioctl->in);\r
- if( p_ioctl->in.cm_mra.p_mra_pdata )\r
- in_buf_sz += p_ioctl->in.cm_mra.mra_length;\r
-\r
- if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate CM handle. MRA could be for a REQ, REP, or LAP. */\r
- h_cm = al_hdl_ref_conn( p_context->h_al, p_ioctl->in.h_cm,\r
- AL_OBJ_SUBTYPE_REQ | AL_OBJ_SUBTYPE_REP | AL_OBJ_SUBTYPE_LAP );\r
- if( !h_cm )\r
- {\r
- p_ioctl->out.status = IB_INVALID_HANDLE;\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
- }\r
-\r
- if( p_ioctl->in.cm_mra.p_mra_pdata )\r
- p_ioctl->in.cm_mra.p_mra_pdata = (uint8_t*)((&p_ioctl->in.cm_mra) + 1);\r
-\r
- p_ioctl->out.status = ib_cm_mra( h_cm, &p_ioctl->in.cm_mra );\r
-\r
- __deref_conn( h_cm );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-cl_status_t\r
-proxy_cm_lap(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_cm_lap_ioctl_t *p_ioctl =\r
- (ual_cm_lap_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_qp_handle_t h_qp;\r
- size_t in_buf_sz;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- in_buf_sz = sizeof(p_ioctl->in);\r
- if( p_ioctl->in.cm_lap.p_lap_pdata )\r
- in_buf_sz += p_ioctl->in.cm_lap.lap_length;\r
-\r
- if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate qp handle */\r
- h_qp = (ib_qp_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP );\r
- if( !h_qp )\r
- {\r
- p_ioctl->out.status = IB_INVALID_QP_HANDLE;\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
- }\r
- p_ioctl->in.cm_lap.h_qp = h_qp;\r
-\r
- if( p_ioctl->in.cm_lap.p_alt_path )\r
- p_ioctl->in.cm_lap.p_alt_path = &p_ioctl->in.alt_path;\r
-\r
- if( p_ioctl->in.cm_lap.p_lap_pdata )\r
- {\r
- p_ioctl->in.cm_lap.p_lap_pdata =\r
- (uint8_t* __ptr64)((&p_ioctl->in.cm_lap.p_lap_pdata) + 1);\r
- }\r
-\r
- p_ioctl->out.status = ib_cm_lap( &p_ioctl->in.cm_lap );\r
-\r
- deref_al_obj( &h_qp->obj );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-cl_status_t\r
-proxy_cm_apr(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_cm_apr_ioctl_t *p_ioctl =\r
- (ual_cm_apr_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_cm_handle_t h_cm_lap;\r
- ib_qp_handle_t h_qp;\r
- size_t in_buf_sz;\r
- uint8_t *p_buf;\r
-\r
- AL_ENTER( AL_DBG_CM );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- in_buf_sz = sizeof(p_ioctl->in);\r
- if( p_ioctl->in.cm_apr.p_apr_pdata )\r
- in_buf_sz += p_ioctl->in.cm_apr.apr_length;\r
-\r
- if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz )\r
- {\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- h_cm_lap = al_hdl_get_conn(\r
- p_context->h_al, p_ioctl->in.h_cm_lap, AL_OBJ_SUBTYPE_LAP );\r
- if( !h_cm_lap )\r
- {\r
- p_ioctl->out.status = IB_INVALID_HANDLE;\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
- }\r
-\r
- /* Validate qp handle */\r
- h_qp = (ib_qp_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP );\r
- if( !h_qp )\r
- {\r
- __deref_conn( h_cm_lap );\r
- p_ioctl->out.status = IB_INVALID_QP_HANDLE;\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
- }\r
- p_ioctl->in.cm_apr.h_qp = h_qp;\r
-\r
- p_buf = (uint8_t*)((&p_ioctl->in.cm_apr) + 1);\r
- if( p_ioctl->in.cm_apr.p_info )\r
- {\r
- p_ioctl->in.cm_apr.p_info = (ib_apr_info_t*)p_buf;\r
- p_buf += p_ioctl->in.cm_apr.info_length;\r
- }\r
- if( p_ioctl->in.cm_apr.p_apr_pdata )\r
- p_ioctl->in.cm_apr.p_apr_pdata = p_buf;\r
-\r
- p_ioctl->out.status = ib_cm_apr( h_cm_lap, &p_ioctl->in.cm_apr );\r
-\r
- __deref_conn( h_cm_lap );\r
- deref_al_obj( &h_qp->obj );\r
-\r
- AL_EXIT( AL_DBG_CM );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-cl_status_t\r
-proxy_cm_force_apm(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- /*\r
- * The force APM path needs to just update the primary path index.\r
- * The actual QP modify needs to happen in UM.\r
- */\r
- UNUSED_PARAM( p_open_context );\r
- UNUSED_PARAM( h_ioctl );\r
- UNUSED_PARAM( p_ret_bytes );\r
- return IB_ERROR;\r
-}\r
-\r
-\r
-cl_status_t\r
-cm_ioctl(\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- cl_status_t cl_status;\r
- IO_STACK_LOCATION *p_io_stack;\r
- void *p_context;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- CL_ASSERT( h_ioctl && p_ret_bytes );\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
- p_context = p_io_stack->FileObject->FsContext;\r
-\r
- if( !p_context )\r
- {\r
- AL_EXIT( AL_DBG_DEV );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- switch( cl_ioctl_ctl_code( h_ioctl ) )\r
- {\r
- case UAL_CM_LISTEN:\r
- cl_status = proxy_cm_listen( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CM_REQ:\r
- cl_status = proxy_cm_req( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CM_REP:\r
- cl_status = proxy_cm_rep( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CM_RTU:\r
- cl_status = proxy_cm_rtu( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CM_REJ:\r
- cl_status = proxy_cm_rej( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CM_DREQ:\r
- cl_status = proxy_cm_dreq( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CM_DREP:\r
- cl_status = proxy_cm_drep( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CM_MRA:\r
- cl_status = proxy_cm_mra( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CM_LAP:\r
- cl_status = proxy_cm_lap( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CM_APR:\r
- cl_status = proxy_cm_apr( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CM_FORCE_APM:\r
- cl_status = proxy_cm_force_apm( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CM_CANCEL:\r
- cl_status = proxy_cm_cancel( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- default:\r
- cl_status = CL_INVALID_PARAMETER;\r
- break;\r
- }\r
-\r
- AL_EXIT( AL_DBG_DEV );\r
- return cl_status;\r
-}\r
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_proxy_ioc.c 9 2005-05-23 22:38:08Z ftillier $\r
- */\r
-\r
-\r
-#include <complib/comp_lib.h>\r
-#include <iba/ib_al.h>\r
-#include <iba/ib_al_ioctl.h>\r
-#include "al_debug.h"\r
-#include "al_dev.h"\r
-#include "al_proxy.h"\r
-\r
-\r
-cl_status_t\r
-ioc_ioctl(\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- cl_status_t cl_status;\r
-\r
- UNUSED_PARAM( p_ret_bytes );\r
-\r
- switch( cl_ioctl_ctl_code( h_ioctl ) )\r
- {\r
- default:\r
- cl_status = CL_INVALID_PARAMETER;\r
- break;\r
- }\r
- return cl_status;\r
-}\r
+++ /dev/null
-/*
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
- *
- * This software is available to you under the OpenIB.org BSD license
- * below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * $Id: al_proxy_verbs.c 548 2006-11-27 20:03:51Z leonidk $
- */
-
-
-#include <complib/comp_lib.h>
-#include <iba/ib_al.h>
-#include <iba/ib_al_ioctl.h>
-#include "al.h"
-#include "al_qp.h"
-#include "al_debug.h"
-#include "al_cm_cep.h"
-
-#if defined(EVENT_TRACING)
-#ifdef offsetof
-#undef offsetof
-#endif
-#include "al_proxy_ndi.tmh"
-#endif
-
-#include "al_dev.h"
-/* Get the internal definitions of apis for the proxy */
-#include "al_ca.h"
-#include "ib_common.h"
-#include "al_proxy_ndi.h"
-#include "al_ndi_cm.h"
-
-/*******************************************************************
- *
- * IOCTLS
- *
- ******************************************************************/
-
-/*
- * Process the ioctl UAL_NDI_CREATE_CQ:
- */
-static cl_status_t
-__ndi_create_cq(
- IN void *p_open_context,
- IN cl_ioctl_handle_t h_ioctl,
- OUT size_t *p_ret_bytes )
-{
- ual_create_cq_ioctl_t *p_ioctl =
- (ual_create_cq_ioctl_t *)cl_ioctl_in_buf( h_ioctl );
- al_dev_open_context_t *p_context =
- (al_dev_open_context_t *)p_open_context;
- ib_ca_handle_t h_ca;
- ib_cq_handle_t h_cq;
- ib_cq_create_t cq_create;
- ci_umv_buf_t *p_umv_buf = NULL;
- ib_api_status_t status;
- ib_pfn_event_cb_t pfn_ev;
-
- AL_ENTER( AL_DBG_NDI );
-
- /* Validate input buffers. */
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )
- {
- status = CL_INVALID_PARAMETER;
- goto exit;
- }
-
- /* Validate CA handle */
- h_ca = (ib_ca_handle_t)
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_ca, AL_OBJ_TYPE_H_CA );
- if( !h_ca )
- {
- status = IB_INVALID_CA_HANDLE;
- goto proxy_create_cq_err1;
- }
-
- cq_create.size = p_ioctl->in.size;
-
- /* Override with proxy's cq callback */
- cq_create.pfn_comp_cb = ndi_cq_compl_cb;
- cq_create.h_wait_obj = NULL;
- pfn_ev = ndi_cq_error_cb;
-
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );
- if( status != IB_SUCCESS )
- goto proxy_create_cq_err2;
-
- status = create_cq( h_ca, &cq_create, p_ioctl->in.context,
- pfn_ev, &h_cq, p_umv_buf );
-
- if( status != IB_SUCCESS )
- goto proxy_create_cq_err2;
-
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );
- if( status == IB_SUCCESS )
- {
- p_ioctl->out.size = cq_create.size;
- p_ioctl->out.h_cq = h_cq->obj.hdl;
- h_cq->obj.hdl_valid = TRUE;
- deref_al_obj( &h_cq->obj );
- }
- else
- {
- h_cq->obj.pfn_destroy( &h_cq->obj, NULL );
-
-proxy_create_cq_err2:
- cl_waitobj_deref( cq_create.h_wait_obj );
-
-proxy_create_cq_err1:
- p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;
- p_ioctl->out.h_cq = AL_INVALID_HANDLE;
- p_ioctl->out.size = 0;
- }
- free_umvbuf( p_umv_buf );
-
- if( h_ca )
- deref_al_obj( &h_ca->obj );
-
- p_ioctl->out.status = status;
- *p_ret_bytes = sizeof(p_ioctl->out);
-
-exit:
- AL_EXIT( AL_DBG_NDI );
- return CL_SUCCESS;
-}
-
-
-static cl_status_t
-__ndi_notify_cq(
- IN void *p_open_context,
- IN cl_ioctl_handle_t h_ioctl,
- OUT size_t *p_ret_bytes )
-{
- cl_status_t cl_status;
- ual_ndi_notify_cq_ioctl_in_t *p_ioctl;
- al_dev_open_context_t *p_context;
- ib_cq_handle_t h_cq;
- UNUSED_PARAM(p_ret_bytes);
-
- AL_ENTER( AL_DBG_NDI );
-
- p_context = (al_dev_open_context_t*)p_open_context;
- p_ioctl = (ual_ndi_notify_cq_ioctl_in_t*)cl_ioctl_in_buf( h_ioctl );
-
- /* Validate user parameters. */
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_ndi_notify_cq_ioctl_in_t) )
- {
- cl_status = CL_INVALID_PARAMETER;
- goto exit;
- }
-
- /* Validate CQ handle */
- h_cq = (ib_cq_handle_t)
- al_hdl_ref( p_context->h_al, p_ioctl->h_cq, AL_OBJ_TYPE_H_CQ );
- if( !h_cq )
- {
- cl_status = CL_INVALID_HANDLE;
- goto exit;
- }
-
- /* enqueue the IRP (h_cq is referenced in al_hdl_ref) */
- if (p_ioctl->notify_comps)
- IoCsqInsertIrp( &h_cq->compl.csq, h_ioctl, NULL );
- else
- IoCsqInsertIrp( &h_cq->error.csq, h_ioctl, NULL );
-
- cl_status = CL_PENDING;
-
-exit:
- AL_EXIT( AL_DBG_NDI );
- return cl_status;
-}
-
-static cl_status_t
-__ndi_cancel_cq(
- IN void *p_open_context,
- IN cl_ioctl_handle_t h_ioctl,
- OUT size_t *p_ret_bytes )
-{
- cl_status_t cl_status;
- ib_cq_handle_t h_cq = NULL;
- al_dev_open_context_t *p_context;
- UNUSED_PARAM(p_ret_bytes);
-
- AL_ENTER( AL_DBG_NDI );
-
- p_context = (al_dev_open_context_t*)p_open_context;
-
- /* Validate user parameters. */
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(uint64_t) )
- {
- cl_status = CL_INVALID_PARAMETER;
- goto exit;
- }
-
- /* Validate CQ handle */
- h_cq = (ib_cq_handle_t)
- al_hdl_ref( p_context->h_al,
- *(uint64_t*)cl_ioctl_in_buf( h_ioctl ), AL_OBJ_TYPE_H_CQ );
- if( !h_cq )
- {
- cl_status = CL_INVALID_HANDLE;
- goto exit;
- }
-
- /* flush IRP queues */
- ndi_cq_flush_ques( h_cq );
-
- cl_status = CL_SUCCESS;
- deref_al_obj( &h_cq->obj );
-
-exit:
- AL_EXIT( AL_DBG_NDI );
- return cl_status;
-}
-
-static cl_status_t
-__ndi_modify_qp(
- IN void *p_open_context,
- IN cl_ioctl_handle_t h_ioctl,
- OUT size_t *p_ret_bytes )
-{
- cl_status_t cl_status;
- ib_api_status_t status;
- ib_qp_handle_t h_qp = NULL;
- al_dev_open_context_t *p_context;
- ual_ndi_modify_qp_ioctl_in_t *p_req =
- (ual_ndi_modify_qp_ioctl_in_t*)cl_ioctl_in_buf( h_ioctl );
-
- UNUSED_PARAM(p_ret_bytes);
-
- AL_ENTER( AL_DBG_NDI );
-
- p_context = (al_dev_open_context_t*)p_open_context;
-
- /* Validate user parameters. */
- if( cl_ioctl_in_size( h_ioctl ) < sizeof(ual_ndi_modify_qp_ioctl_in_t))
- {
- cl_status = CL_INVALID_PARAMETER;
- goto exit;
- }
-
- /* Validate QP handle */
- h_qp = (ib_qp_handle_t)al_hdl_ref( p_context->h_al, p_req->h_qp, AL_OBJ_TYPE_H_QP );
- if( !h_qp )
- {
- cl_status = CL_INVALID_HANDLE;
- goto exit;
- }
-
- /* Check QP type */
- if( h_qp->type != IB_QPT_RELIABLE_CONN )
- {
- cl_status = CL_INVALID_HANDLE;
- goto exit;
- }
-
- /* perform the ioctl */
- status = ndi_modify_qp( h_qp, &p_req->qp_mod,
- cl_ioctl_out_size( h_ioctl ), cl_ioctl_out_buf( h_ioctl ) );
- if ( status != IB_SUCCESS )
- {
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
- ("ndi_modify_qp returned %s.\n", ib_get_err_str(status) ) );
- cl_status = CL_ERROR;
- }
- else
- {
- cl_status = CL_SUCCESS;
- *p_ret_bytes = cl_ioctl_out_size( h_ioctl );
- }
-
-exit:
- if ( h_qp )
- deref_al_obj( &h_qp->obj );
-
- AL_EXIT( AL_DBG_NDI );
- return cl_status;
-}
-
-static cl_status_t
-__ndi_req_cm(
- IN void *p_open_context,
- IN cl_ioctl_handle_t h_ioctl,
- OUT size_t *p_ret_bytes )
-{
- cl_status_t cl_status;
- ib_qp_handle_t h_qp = NULL;
- al_dev_open_context_t *p_context;
- ual_ndi_req_cm_ioctl_in_t *p_parm, *p_req =
- (ual_ndi_req_cm_ioctl_in_t*)cl_ioctl_in_buf( h_ioctl );
- UNUSED_PARAM(p_ret_bytes);
-
- AL_ENTER( AL_DBG_NDI );
-
- p_context = (al_dev_open_context_t*)p_open_context;
-
- /* Validate user parameters. */
- if( cl_ioctl_in_size( h_ioctl ) < sizeof(ual_ndi_req_cm_ioctl_in_t))
- {
- cl_status = CL_INVALID_PARAMETER;
- goto exit;
- }
-
- /* Validate QP handle */
- h_qp = (ib_qp_handle_t)al_hdl_ref( p_context->h_al, p_req->h_qp, AL_OBJ_TYPE_H_QP );
- if( !h_qp )
- {
- cl_status = CL_INVALID_HANDLE;
- goto exit;
- }
-
- /* Check QP type */
- if( h_qp->type != IB_QPT_RELIABLE_CONN )
- {
- cl_status = CL_INVALID_HANDLE;
- goto exit;
- }
-
- /* Check psize */
- if ( p_req->pdata_size > sizeof(p_req->pdata) )
- {
- cl_status = CL_INVALID_PARAMETER;
- goto exit;
- }
-
- /* copy request parameters a side to prevent problems from cancelled IRP */
- p_parm = cl_zalloc( sizeof(ual_ndi_req_cm_ioctl_in_t) );
- if (!p_parm )
- {
- cl_status = CL_INSUFFICIENT_MEMORY;
- goto exit;
- }
- RtlCopyMemory( p_parm, p_req, sizeof(ual_ndi_req_cm_ioctl_in_t) );
- p_parm->h_qp = (uint64_t)h_qp;
-
- /* perform the ioctl */
- cl_status = ndi_req_cm( h_qp, h_ioctl, p_parm );
-
-exit:
- if (h_qp)
- deref_al_obj( &h_qp->obj );
-
- AL_EXIT( AL_DBG_NDI );
- return cl_status;
-}
-
-static cl_status_t
-__ndi_rep_cm(
- IN void *p_open_context,
- IN cl_ioctl_handle_t h_ioctl,
- OUT size_t *p_ret_bytes )
-{
- cl_status_t cl_status;
- ib_qp_handle_t h_qp = NULL;
- al_dev_open_context_t *p_context;
- net32_t cid;
- ual_ndi_rep_cm_ioctl_in_t *p_rep =
- (ual_ndi_rep_cm_ioctl_in_t*)cl_ioctl_in_buf( h_ioctl );
- UNUSED_PARAM(p_ret_bytes);
-
- AL_ENTER( AL_DBG_NDI );
-
- p_context = (al_dev_open_context_t*)p_open_context;
-
- /* Validate user parameters. */
- if( (cl_ioctl_in_size( h_ioctl ) < sizeof(ual_ndi_rep_cm_ioctl_in_t)) ||
- cl_ioctl_out_size( h_ioctl ) < sizeof(net32_t) )
- {
- cl_status = CL_INVALID_PARAMETER;
- goto exit;
- }
-
- /* Get and validate QP handle */
- h_qp = (ib_qp_handle_t)al_hdl_ref( p_context->h_al, p_rep->h_qp, AL_OBJ_TYPE_H_QP );
- if( !h_qp )
- {
- cl_status = CL_INVALID_HANDLE;
- goto exit;
- }
-
- if( h_qp->type != IB_QPT_RELIABLE_CONN )
- {
- cl_status = CL_INVALID_HANDLE;
- goto exit;
- }
-
- /* Check psize */
- if ( p_rep->pdata_size >= sizeof(p_rep->pdata) )
- {
- cl_status = CL_INVALID_PARAMETER;
- goto exit;
- }
-
- /* Get and validate CID */
- cid = p_rep->cid;
-
- /* perform the ioctls */
- cl_status = ndi_rep_cm( h_qp, cid, h_ioctl, p_rep );
-
-exit:
- if (h_qp)
- deref_al_obj( &h_qp->obj );
-
- AL_EXIT( AL_DBG_NDI );
- return cl_status;
-}
-
-
-static cl_status_t
-__ndi_rej_cm(
- IN void *p_open_context,
- IN cl_ioctl_handle_t h_ioctl,
- OUT size_t *p_ret_bytes )
-{
- al_dev_open_context_t *p_context;
- net32_t cid;
- ib_api_status_t status;
- ual_ndi_rej_cm_ioctl_in_t *p_rej =
- (ual_ndi_rej_cm_ioctl_in_t*)cl_ioctl_in_buf( h_ioctl );
- UNUSED_PARAM(p_ret_bytes);
-
- AL_ENTER( AL_DBG_NDI );
-
- p_context = (al_dev_open_context_t*)p_open_context;
-
- /* Check psize */
- if ( p_rej->pdata_size >= sizeof(p_rej->pdata) )
- {
- h_ioctl->IoStatus.Status = CL_INVALID_PARAMETER;
- goto exit;
- }
-
- /* Get and validate CID */
- cid = p_rej->cid;
-
- /* perform the ioctl */
- status = al_cep_rej( p_context->h_al, cid, IB_REJ_INSUF_QP,
- NULL, 0, p_rej->pdata, p_rej->pdata_size);
- if (status != IB_SUCCESS)
- {
- h_ioctl->IoStatus.Status = CL_INVALID_HANDLE;
- goto exit;
- }
-
- al_destroy_cep( p_context->h_al, cid, NULL );
- h_ioctl->IoStatus.Status = STATUS_SUCCESS;
-
-exit:
- /* complete the IRP */
- h_ioctl->IoStatus.Information = 0;
- IoCompleteRequest( h_ioctl, IO_NO_INCREMENT );
-
- AL_EXIT( AL_DBG_NDI );
- return CL_COMPLETED;
-}
-
-static cl_status_t
-__ndi_rtu_cm(
- IN void *p_open_context,
- IN cl_ioctl_handle_t h_ioctl,
- OUT size_t *p_ret_bytes )
-{
- cl_status_t cl_status;
- ib_qp_handle_t h_qp = NULL;
- al_dev_open_context_t *p_context;
- ual_ndi_rtu_cm_ioctl_in_t *p_rtu =
- (ual_ndi_rtu_cm_ioctl_in_t*)cl_ioctl_in_buf( h_ioctl );
-
- UNUSED_PARAM(p_ret_bytes);
-
- AL_ENTER( AL_DBG_NDI );
-
- p_context = (al_dev_open_context_t*)p_open_context;
-
- /* Validate user parameters. */
- if( cl_ioctl_in_size( h_ioctl ) < sizeof(ual_ndi_rtu_cm_ioctl_in_t))
- {
- cl_status = CL_INVALID_PARAMETER;
- goto exit;
- }
-
- /* Validate QP handle */
- h_qp = (ib_qp_handle_t)al_hdl_ref( p_context->h_al, p_rtu->h_qp, AL_OBJ_TYPE_H_QP );
- if( !h_qp )
- {
- cl_status = CL_INVALID_HANDLE;
- goto exit;
- }
-
- /* Check QP type */
- if( h_qp->type != IB_QPT_RELIABLE_CONN )
- {
- cl_status = CL_INVALID_HANDLE;
- goto exit;
- }
-
- /* perform the ioctl */
- cl_status = ndi_rtu_cm( h_qp, h_ioctl );
-
-exit:
- if (h_qp)
- deref_al_obj( &h_qp->obj );
-
- AL_EXIT( AL_DBG_NDI );
- return cl_status;
-}
-
-static cl_status_t
-__ndi_dreq_cm(
- IN void *p_open_context,
- IN cl_ioctl_handle_t h_ioctl,
- OUT size_t *p_ret_bytes )
-{
- cl_status_t cl_status;
- ib_qp_handle_t h_qp = NULL;
- al_dev_open_context_t *p_context;
-
- UNUSED_PARAM(p_ret_bytes);
-
- AL_ENTER( AL_DBG_NDI );
-
- p_context = (al_dev_open_context_t*)p_open_context;
-
- /* Validate user parameters. */
- if( cl_ioctl_in_size( h_ioctl ) < sizeof(uint64_t))
- {
- cl_status = CL_INVALID_PARAMETER;
- goto exit;
- }
-
- /* Validate QP handle */
- h_qp = (ib_qp_handle_t)al_hdl_ref( p_context->h_al,
- *(uint64_t*)cl_ioctl_in_buf( h_ioctl ), AL_OBJ_TYPE_H_QP );
- if( !h_qp )
- {
- cl_status = CL_CONNECTION_INVALID;
- goto exit;
- }
-
- /* Check QP type */
- if( h_qp->type != IB_QPT_RELIABLE_CONN )
- {
- cl_status = CL_CONNECTION_INVALID;
- goto exit;
- }
-
- /* perform the ioctl */
- cl_status = ndi_dreq_cm( h_qp, h_ioctl );
-
-exit:
- if (h_qp)
- deref_al_obj( &h_qp->obj );
-
- AL_EXIT( AL_DBG_NDI );
- return cl_status;
-}
-
-cl_status_t
-ndi_ioctl(
- IN cl_ioctl_handle_t h_ioctl,
- OUT size_t *p_ret_bytes )
-{
- cl_status_t cl_status;
- IO_STACK_LOCATION *p_io_stack;
- void *p_context;
-
- AL_ENTER( AL_DBG_NDI );
-
- p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );
- p_context = p_io_stack->FileObject->FsContext;
-
- if( !p_context )
- {
- AL_EXIT( AL_DBG_DEV );
- return CL_INVALID_PARAMETER;
- }
-
- switch( cl_ioctl_ctl_code( h_ioctl ) )
- {
- case UAL_NDI_CREATE_CQ:
- cl_status = __ndi_create_cq( p_context, h_ioctl, p_ret_bytes );
- break;
- case UAL_NDI_NOTIFY_CQ:
- cl_status = __ndi_notify_cq( p_context, h_ioctl, p_ret_bytes );
- break;
- case UAL_NDI_CANCEL_CQ:
- cl_status = __ndi_cancel_cq( p_context, h_ioctl, p_ret_bytes );
- break;
- case UAL_NDI_MODIFY_QP:
- cl_status = __ndi_modify_qp( p_context, h_ioctl, p_ret_bytes );
- break;
- case UAL_NDI_REQ_CM:
- cl_status = __ndi_req_cm( p_context, h_ioctl, p_ret_bytes );
- break;
- case UAL_NDI_REP_CM:
- cl_status = __ndi_rep_cm( p_context, h_ioctl, p_ret_bytes );
- break;
- case UAL_NDI_RTU_CM:
- cl_status = __ndi_rtu_cm( p_context, h_ioctl, p_ret_bytes );
- break;
- case UAL_NDI_REJ_CM:
- cl_status = __ndi_rej_cm( p_context, h_ioctl, p_ret_bytes );
- break;
- case UAL_NDI_DREQ_CM:
- cl_status = __ndi_dreq_cm( p_context, h_ioctl, p_ret_bytes );
- break;
- default:
- cl_status = CL_INVALID_PARAMETER;
- break;
- }
-
- AL_EXIT( AL_DBG_NDI );
- return cl_status;
-}
-
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_proxy_subnet.c 426 2006-07-24 19:18:19Z ftillier $\r
- */\r
-\r
-\r
-#include <complib/comp_lib.h>\r
-#include <iba/ib_al.h>\r
-#include <iba/ib_al_ioctl.h>\r
-\r
-#include "al.h"\r
-#include "al_av.h"\r
-#include "al_ca.h"\r
-#include "al_cq.h"\r
-#include "al_debug.h"\r
-#if defined(EVENT_TRACING)\r
-#ifdef offsetof\r
-#undef offsetof\r
-#endif\r
-#include "al_proxy_subnet.tmh"\r
-#endif\r
-#include "al_dev.h"\r
-#include "al_mad_pool.h"\r
-#include "al_mr.h"\r
-#include "al_mw.h"\r
-#include "al_pd.h"\r
-#include "al_qp.h"\r
-#include "ib_common.h"\r
-#include "al_proxy.h"\r
-\r
-\r
-extern ib_pool_handle_t gh_mad_pool;\r
-\r
-\r
-\r
-static\r
-cl_status_t\r
-proxy_reg_svc(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- UNUSED_PARAM( p_open_context );\r
- UNUSED_PARAM( h_ioctl );\r
- UNUSED_PARAM( p_ret_bytes );\r
- return CL_ERROR;\r
-}\r
-static\r
-cl_status_t\r
-proxy_dereg_svc(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- UNUSED_PARAM( p_open_context );\r
- UNUSED_PARAM( h_ioctl );\r
- UNUSED_PARAM( p_ret_bytes );\r
- return CL_ERROR;\r
-}\r
-\r
-\r
-static void\r
-__proxy_sa_req_cb(\r
- IN al_sa_req_t *p_sa_req,\r
- IN ib_mad_element_t *p_mad_response )\r
-{\r
- IRP *p_irp;\r
- IO_STACK_LOCATION *p_io_stack;\r
- ual_send_sa_req_ioctl_t *p_ioctl;\r
- al_dev_open_context_t *p_context;\r
- uint64_t hdl;\r
-\r
- AL_ENTER( AL_DBG_QUERY );\r
-\r
- p_irp = (IRP*)p_sa_req->user_context;\r
- CL_ASSERT( p_irp );\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( p_irp );\r
- p_ioctl = cl_ioctl_out_buf( p_irp );\r
-\r
- p_context = p_io_stack->FileObject->FsContext;\r
- ASSERT( p_context );\r
-#pragma warning(push, 3)\r
- IoSetCancelRoutine( p_irp, NULL );\r
-#pragma warning(pop)\r
- /* Clear the pointer to the query to prevent cancelation. */\r
- hdl = (size_t)InterlockedExchangePointer(\r
- &p_irp->Tail.Overlay.DriverContext[0], AL_INVALID_HANDLE );\r
-\r
- cl_spinlock_acquire( &p_context->h_al->obj.lock );\r
- if( hdl != AL_INVALID_HANDLE )\r
- {\r
- CL_ASSERT( p_sa_req ==\r
- al_hdl_chk( p_context->h_al, hdl, AL_OBJ_TYPE_H_SA_REQ ) );\r
- al_hdl_free( p_context->h_al, hdl );\r
- }\r
-\r
- p_ioctl->out.status = p_sa_req->status;\r
- if( p_mad_response )\r
- {\r
- /* Insert an item to track the MAD until the user fetches it. */\r
- hdl = al_hdl_insert( p_context->h_al,\r
- p_mad_response, AL_OBJ_TYPE_H_MAD );\r
- if( hdl != AL_INVALID_HANDLE )\r
- {\r
- p_ioctl->out.h_resp = hdl;\r
- p_ioctl->out.resp_size = p_mad_response->size;\r
- }\r
- else\r
- {\r
- p_ioctl->out.h_resp = AL_INVALID_HANDLE;\r
- p_ioctl->out.resp_size = 0;\r
- p_ioctl->out.status = IB_TIMEOUT;\r
- ib_put_mad( p_sa_req->p_mad_response );\r
- }\r
- }\r
- else\r
- {\r
- p_ioctl->out.h_resp = AL_INVALID_HANDLE;\r
- p_ioctl->out.resp_size = 0;\r
- }\r
- cl_spinlock_release( &p_context->h_al->obj.lock );\r
-\r
- p_irp->IoStatus.Status = STATUS_SUCCESS;\r
- p_irp->IoStatus.Information = sizeof(p_ioctl->out);\r
- IoCompleteRequest( p_irp, IO_NO_INCREMENT );\r
-\r
- /* Release the reference taken when the query was initiated. */\r
- proxy_context_deref( p_context );\r
-\r
- cl_free( p_sa_req );\r
-\r
- AL_EXIT( AL_DBG_QUERY );\r
-}\r
-\r
-\r
-static void\r
-__proxy_cancel_sa_req(\r
- IN DEVICE_OBJECT* p_dev_obj,\r
- IN IRP* p_irp )\r
-{\r
- al_dev_open_context_t *p_context;\r
- PIO_STACK_LOCATION p_io_stack;\r
- uint64_t hdl;\r
- al_sa_req_t *p_sa_req;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- UNUSED_PARAM( p_dev_obj );\r
-\r
- /* Get the stack location. */\r
- p_io_stack = IoGetCurrentIrpStackLocation( p_irp );\r
-\r
- p_context = (al_dev_open_context_t *)p_io_stack->FileObject->FsContext;\r
- ASSERT( p_context );\r
-\r
- hdl = (size_t)InterlockedExchangePointer(\r
- &p_irp->Tail.Overlay.DriverContext[0], NULL );\r
- if( hdl != AL_INVALID_HANDLE )\r
- {\r
-#pragma warning(push, 3)\r
- IoSetCancelRoutine( p_irp, NULL );\r
-#pragma warning(pop)\r
- cl_spinlock_acquire( &p_context->h_al->obj.lock );\r
- p_sa_req = al_hdl_chk( p_context->h_al, hdl, AL_OBJ_TYPE_H_SA_REQ );\r
- CL_ASSERT( p_sa_req );\r
- al_cancel_sa_req( p_sa_req );\r
- al_hdl_free( p_context->h_al, hdl );\r
- cl_spinlock_release( &p_context->h_al->obj.lock );\r
- }\r
-\r
- IoReleaseCancelSpinLock( p_irp->CancelIrql );\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_send_sa_req(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_send_sa_req_ioctl_t *p_ioctl;\r
- cl_status_t status;\r
- ib_api_status_t ib_status, *p_usr_status;\r
- IO_STACK_LOCATION *p_io_stack;\r
- al_dev_open_context_t *p_context;\r
- al_sa_req_t *p_sa_req;\r
- uint64_t hdl, *p_usr_hdl;\r
-\r
- AL_ENTER( AL_DBG_QUERY );\r
-\r
- UNUSED_PARAM( p_ret_bytes );\r
-\r
- p_context = p_open_context;\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
- /*\r
- * We support SA requests coming in either through the main file object\r
- * or the async file handle.\r
- */\r
- if( p_io_stack->FileObject->FsContext2 &&\r
- (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_SA_REQ_SVC )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Invalid file object type for request: %016I64x\n",\r
- (LONG_PTR)p_io_stack->FileObject->FsContext2) );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Check the size of the ioctl */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid IOCTL buffers.\n") );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- p_ioctl = cl_ioctl_in_buf( h_ioctl );\r
- CL_ASSERT( p_ioctl );\r
-\r
- /* Must save user's pointers in case req completes before call returns. */\r
- p_usr_status = p_ioctl->in.p_status;\r
- p_usr_hdl = p_ioctl->in.ph_sa_req;\r
-\r
- if( p_ioctl->in.sa_req.attr_size > IB_SA_DATA_SIZE )\r
- {\r
- ib_status = IB_INVALID_SETTING;\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid SA data size: %d\n",\r
- p_ioctl->in.sa_req.attr_size) );\r
- goto proxy_send_sa_req_err1;\r
- }\r
-\r
- p_sa_req = (al_sa_req_t*)cl_zalloc( sizeof(al_sa_req_t) );\r
- if( !p_sa_req )\r
- {\r
- ib_status = IB_INSUFFICIENT_MEMORY;\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed to allocate SA req.\n") );\r
- goto proxy_send_sa_req_err1;\r
- }\r
-\r
- /* Synchronize with callbacks. */\r
- cl_spinlock_acquire( &p_context->h_al->obj.lock );\r
-\r
- /* Track the request. */\r
- hdl = al_hdl_insert( p_context->h_al, p_sa_req, AL_OBJ_TYPE_H_SA_REQ );\r
- if( hdl == AL_INVALID_HANDLE )\r
- {\r
- ib_status = IB_INSUFFICIENT_MEMORY;\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed to create handle.\n") );\r
- goto proxy_send_sa_req_err2;\r
- }\r
-\r
- /*\r
- * Store the handle in the IRP's driver context so we can cancel it.\r
- * Note that the handle is really a size_t variable, but is cast to a\r
- * uint64_t to provide constant size in mixed 32- and 64-bit environments.\r
- */\r
- h_ioctl->Tail.Overlay.DriverContext[0] = (void*)(size_t)hdl;\r
-\r
- /* Format the SA request */\r
- p_sa_req->user_context = h_ioctl;\r
- p_sa_req->pfn_sa_req_cb = __proxy_sa_req_cb;\r
-\r
- p_ioctl->in.sa_req.p_attr = p_ioctl->in.attr;\r
-\r
- /*\r
- * We never pass the user-mode flag when sending SA requests - the\r
- * I/O manager will perform all synchronization to make this IRP sync\r
- * if it needs to.\r
- */\r
- ib_status = al_send_sa_req( p_sa_req, p_ioctl->in.port_guid,\r
- p_ioctl->in.timeout_ms, p_ioctl->in.retry_cnt,\r
- &p_ioctl->in.sa_req, 0 );\r
- if( ib_status == IB_SUCCESS )\r
- {\r
- /* Hold a reference on the proxy context until the request completes. */\r
- proxy_context_ref( p_context );\r
-#pragma warning(push, 3)\r
- IoSetCancelRoutine( h_ioctl, __proxy_cancel_sa_req );\r
-#pragma warning(pop)\r
- IoMarkIrpPending( h_ioctl );\r
-\r
- cl_spinlock_release( &p_context->h_al->obj.lock );\r
-\r
- cl_copy_to_user( p_usr_hdl, &hdl, sizeof(hdl) );\r
- status = CL_PENDING;\r
- }\r
- else\r
- {\r
- al_hdl_free( p_context->h_al, hdl );\r
-\r
-proxy_send_sa_req_err2:\r
- cl_spinlock_release( &p_context->h_al->obj.lock );\r
- cl_free( p_sa_req );\r
-\r
-proxy_send_sa_req_err1:\r
- status = CL_INVALID_PARAMETER;\r
- }\r
-\r
- cl_copy_to_user( p_usr_status, &ib_status, sizeof(ib_api_status_t) );\r
-\r
- AL_EXIT( AL_DBG_QUERY );\r
- return status;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_cancel_sa_req(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_cancel_sa_req_ioctl_t *p_ioctl;\r
- al_dev_open_context_t *p_context;\r
- al_sa_req_t *p_sa_req;\r
-\r
- AL_ENTER( AL_DBG_QUERY );\r
-\r
- UNUSED_PARAM( p_ret_bytes );\r
-\r
- p_context = p_open_context;\r
-\r
- /* Check the size of the ioctl */\r
- if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cancel_sa_req_ioctl_t) ||\r
- cl_ioctl_out_size( h_ioctl ) )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid input buffer.\n") );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- p_ioctl = cl_ioctl_in_buf( h_ioctl );\r
- CL_ASSERT( p_ioctl );\r
-\r
- cl_spinlock_acquire( &p_context->h_al->obj.lock );\r
- p_sa_req =\r
- al_hdl_chk( p_context->h_al, p_ioctl->h_sa_req, AL_OBJ_TYPE_H_SA_REQ );\r
- if( p_sa_req )\r
- al_cancel_sa_req( p_sa_req );\r
- cl_spinlock_release( &p_context->h_al->obj.lock );\r
-\r
- AL_EXIT( AL_DBG_QUERY );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-static cl_status_t\r
-proxy_send_mad(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_send_mad_ioctl_t *p_ioctl =\r
- (ual_send_mad_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_mad_svc_handle_t h_mad_svc;\r
- ib_pool_key_t pool_key = NULL;\r
- ib_av_handle_t h_av = NULL;\r
- ib_mad_element_t *p_mad_el;\r
- al_mad_element_t *p_al_el;\r
- ib_mad_t *p_mad_buf, *p_usr_buf;\r
- ib_grh_t *p_grh, *p_usr_grh;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_MAD );\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_MAD );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate mad svc handle. */\r
- h_mad_svc = (ib_mad_svc_handle_t)al_hdl_ref(\r
- p_context->h_al, p_ioctl->in.h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC );\r
- if( !h_mad_svc )\r
- {\r
- status = IB_INVALID_HANDLE;\r
- goto proxy_send_mad_err1;\r
- }\r
-\r
- /* Validate the pool key */\r
- pool_key = (ib_pool_key_t)al_hdl_ref(\r
- p_context->h_al, p_ioctl->in.pool_key, AL_OBJ_TYPE_H_POOL_KEY );\r
- if( !pool_key )\r
- {\r
- status = IB_INVALID_HANDLE;\r
- goto proxy_send_mad_err1;\r
- }\r
-\r
- /* Validate the AV handle in the mad element if it is not NULL. */\r
- if( p_ioctl->in.h_av )\r
- {\r
- h_av = (ib_av_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_av, AL_OBJ_TYPE_H_AV );\r
- if( !h_av )\r
- {\r
- status = IB_INVALID_AV_HANDLE;\r
- goto proxy_send_mad_err1;\r
- }\r
- }\r
-\r
- /*\r
- * Get a mad element from kernel MAD pool\r
- * This should not fail since the pool is set to grow\r
- * dynamically\r
- */\r
- status = ib_get_mad( pool_key, p_ioctl->in.size, &p_mad_el );\r
- if( status != IB_SUCCESS )\r
- goto proxy_send_mad_err1;\r
-\r
- /* Store the MAD and GRH buffers pointers. */\r
- p_mad_buf = p_mad_el->p_mad_buf;\r
- p_grh = p_mad_el->p_grh;\r
-\r
- /* Now copy the mad element with all info */\r
- status = ib_convert_cl_status( cl_copy_from_user( p_mad_el,\r
- p_ioctl->in.p_mad_element, sizeof(ib_mad_element_t) ) );\r
- if( status != IB_SUCCESS )\r
- goto proxy_send_mad_err2;\r
-\r
- /* Store the UM pointers. */\r
- p_usr_buf = p_mad_el->p_mad_buf;\r
- p_usr_grh = p_mad_el->p_grh;\r
- /* Restore the MAD and GRH buffer pointers. */\r
- p_mad_el->p_mad_buf = p_mad_buf;\r
- p_mad_el->p_grh = p_grh;\r
- /* Clear the next pointer. */\r
- p_mad_el->p_next = NULL;\r
- /*\r
- * Override the send context so that a response's MAD has a way\r
- * of getting back to the associated send. This is needed because a\r
- * MAD receive completion could fail to be delivered to the app even though\r
- * the response was properly received in the kernel.\r
- */\r
- p_mad_el->context1 = p_ioctl->in.p_mad_element;\r
-\r
- /* Set the kernel AV handle. This is either NULL or a valid KM handle. */\r
- p_mad_el->h_av = h_av;\r
-\r
- /* Copy the GRH, if valid. */\r
- if( p_mad_el->grh_valid )\r
- {\r
- status = ib_convert_cl_status(\r
- cl_copy_from_user( p_grh, p_usr_grh, sizeof(ib_grh_t) ) );\r
- if( status != IB_SUCCESS )\r
- goto proxy_send_mad_err2;\r
- }\r
-\r
- /* Copy the mad payload. */\r
- status = ib_convert_cl_status(\r
- cl_copy_from_user( p_mad_buf, p_usr_buf, p_ioctl->in.size ) );\r
- if( status != IB_SUCCESS )\r
- goto proxy_send_mad_err2;\r
-\r
- /* Copy the handle to UM to allow cancelling. */\r
- status = ib_convert_cl_status( cl_copy_to_user(\r
- p_ioctl->in.ph_proxy, p_mad_el, sizeof(ib_mad_element_t*) ) );\r
- if( status != IB_SUCCESS )\r
- goto proxy_send_mad_err2;\r
-\r
- /*\r
- * Copy the UM element pointer to the kernel's AL element\r
- * for use in completion generation.\r
- */\r
- p_al_el = PARENT_STRUCT( p_mad_el, al_mad_element_t, element );\r
- p_al_el->h_proxy_element = p_ioctl->in.p_mad_element;\r
-\r
- /* Post the element. */\r
- status = ib_send_mad( h_mad_svc, p_mad_el, NULL );\r
-\r
- if( status != IB_SUCCESS )\r
- {\r
-proxy_send_mad_err2:\r
- ib_put_mad( p_mad_el );\r
- }\r
-proxy_send_mad_err1:\r
-\r
- if( h_av )\r
- deref_al_obj( &h_av->obj );\r
- if( pool_key )\r
- deref_al_obj( &pool_key->obj );\r
- if( h_mad_svc )\r
- deref_al_obj( &h_mad_svc->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_MAD );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl to retrieve a received MAD.\r
- */\r
-static cl_status_t\r
-proxy_mad_comp(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_mad_recv_ioctl_t *p_ioctl;\r
- al_dev_open_context_t *p_context;\r
- ib_mad_element_t *p_mad;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_MAD );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_MAD );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- p_ioctl = (ual_mad_recv_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
-\r
- /* Validate the MAD handle and remove it from the handle manager. */\r
- p_mad = al_hdl_get_mad( p_context->h_al, p_ioctl->in.h_mad );\r
- if( !p_mad )\r
- {\r
- status = IB_INVALID_HANDLE;\r
- goto proxy_mad_comp_err1;\r
- }\r
-\r
- /*\r
- * Return the MAD to the user. The user-mode library is responsible\r
- * for correcting all pointers.\r
- */\r
- status = ib_convert_cl_status( cl_copy_to_user(\r
- p_ioctl->in.p_user_mad, p_mad, sizeof(ib_mad_element_t) ) );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Unable to copy element to user's MAD\n") );\r
- goto proxy_mad_comp_err2;\r
- }\r
-\r
- /* Copy the MAD buffer. */\r
- status = ib_convert_cl_status( cl_copy_to_user(\r
- p_ioctl->in.p_mad_buf, p_mad->p_mad_buf, p_mad->size ) );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Unable to copy buffer to user's MAD\n") );\r
- goto proxy_mad_comp_err2;\r
- }\r
-\r
- /* Copy the GRH if it is valid. */\r
- if( p_mad->grh_valid )\r
- {\r
- status = ib_convert_cl_status( cl_copy_to_user(\r
- p_ioctl->in.p_grh, p_mad->p_grh, sizeof(ib_grh_t) ) );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Unable to copy GRH to user's MAD\n") );\r
- goto proxy_mad_comp_err2;\r
- }\r
- }\r
-\r
- if( status == IB_SUCCESS )\r
- {\r
- ib_put_mad( p_mad );\r
- }\r
- else\r
- {\r
-proxy_mad_comp_err2:\r
- ib_put_mad( p_mad );\r
-proxy_mad_comp_err1:\r
- cl_memclr( &p_ioctl->out, sizeof(p_ioctl->out) );\r
- }\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_MAD );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-static cl_status_t\r
-proxy_init_dgrm(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- UNUSED_PARAM( p_open_context );\r
- UNUSED_PARAM( h_ioctl );\r
- UNUSED_PARAM( p_ret_bytes );\r
- return CL_ERROR;\r
-}\r
-\r
-\r
-\r
-static void\r
-__proxy_mad_send_cb(\r
- IN ib_mad_svc_handle_t h_mad_svc,\r
- IN void *mad_svc_context,\r
- IN ib_mad_element_t *p_mad_element )\r
-{\r
- misc_cb_ioctl_info_t cb_info;\r
- al_dev_open_context_t *p_context;\r
- al_mad_element_t *p_al_el;\r
-\r
- AL_ENTER( AL_DBG_MAD );\r
-\r
- CL_ASSERT( p_mad_element );\r
- CL_ASSERT( !p_mad_element->p_next );\r
- p_context = h_mad_svc->obj.h_al->p_context;\r
- p_al_el = PARENT_STRUCT( p_mad_element, al_mad_element_t, element );\r
-\r
- /*\r
- * If we're already closing the device - do not queue a callback, since\r
- * we're cleaning up the callback lists.\r
- */\r
- if( proxy_context_ref( p_context ) )\r
- {\r
- /* Set up context and callback record type appropriate for UAL */\r
- cb_info.rec_type = MAD_SEND_REC;\r
- cb_info.ioctl_rec.mad_send_cb_ioctl_rec.wc_status =\r
- p_mad_element->status;\r
- cb_info.ioctl_rec.mad_send_cb_ioctl_rec.p_um_mad =\r
- p_al_el->h_proxy_element;\r
- cb_info.ioctl_rec.mad_send_cb_ioctl_rec.mad_svc_context =\r
- mad_svc_context;\r
-\r
- /* Queue this mad completion notification for the user. */\r
- proxy_queue_cb_buf( UAL_GET_MISC_CB_INFO, p_context, &cb_info,\r
- &h_mad_svc->obj );\r
- }\r
-\r
- /* Return the MAD. */\r
- ib_put_mad( p_mad_element );\r
-\r
- proxy_context_deref( p_context );\r
- AL_EXIT( AL_DBG_MAD );\r
-}\r
-\r
-\r
-\r
-static void\r
-__proxy_mad_recv_cb(\r
- IN ib_mad_svc_handle_t h_mad_svc,\r
- IN void *mad_svc_context,\r
- IN ib_mad_element_t *p_mad_element )\r
-{\r
- misc_cb_ioctl_info_t cb_info;\r
- al_dev_open_context_t *p_context;\r
- al_mad_element_t *p_al_mad;\r
- uint64_t hdl;\r
-\r
- AL_ENTER( AL_DBG_MAD );\r
-\r
- p_context = h_mad_svc->obj.h_al->p_context;\r
-\r
- p_al_mad = PARENT_STRUCT( p_mad_element, al_mad_element_t, element );\r
-\r
- /* Set up context and callback record type appropriate for UAL */\r
- cb_info.rec_type = MAD_RECV_REC;\r
- cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.mad_svc_context = mad_svc_context;\r
- cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.elem_size = p_mad_element->size;\r
- cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.p_send_mad =\r
- (ib_mad_element_t* __ptr64)p_mad_element->send_context1;\r
-\r
- /*\r
- * If we're already closing the device - do not queue a callback, since\r
- * we're cleaning up the callback lists.\r
- */\r
- if( !proxy_context_ref( p_context ) )\r
- {\r
- ib_put_mad( p_mad_element );\r
- AL_EXIT( AL_DBG_MAD );\r
- return;\r
- }\r
-\r
- /* Insert an item to track the MAD until the user fetches it. */\r
- cl_spinlock_acquire( &p_context->h_al->obj.lock );\r
- hdl = al_hdl_insert( p_context->h_al, p_mad_element, AL_OBJ_TYPE_H_MAD );\r
- if( hdl == AL_INVALID_HANDLE )\r
- goto proxy_mad_recv_cb_err;\r
-\r
- cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.h_mad = hdl;\r
-\r
- /* Queue this mad completion notification for the user. */\r
- if( !proxy_queue_cb_buf( UAL_GET_MISC_CB_INFO, p_context, &cb_info,\r
- &h_mad_svc->obj ) )\r
- {\r
- al_hdl_free( p_context->h_al, hdl );\r
-proxy_mad_recv_cb_err:\r
- ib_put_mad( p_mad_element );\r
- }\r
- cl_spinlock_release( &p_context->h_al->obj.lock );\r
-\r
- proxy_context_deref( p_context );\r
-\r
- AL_EXIT( AL_DBG_MAD );\r
-}\r
-\r
-\r
-\r
-static cl_status_t\r
-proxy_reg_mad_svc(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_reg_mad_svc_ioctl_t *p_ioctl =\r
- (ual_reg_mad_svc_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_qp_handle_t h_qp;\r
- ib_mad_svc_handle_t h_mad_svc;\r
-\r
- AL_ENTER( AL_DBG_MAD );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_MAD );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate QP handle */\r
- h_qp = (ib_qp_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP );\r
- if( !h_qp )\r
- {\r
- p_ioctl->out.status = IB_INVALID_QP_HANDLE;\r
- p_ioctl->out.h_mad_svc = AL_INVALID_HANDLE;\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
- return CL_SUCCESS;\r
- }\r
-\r
- /* Now proxy's mad_svc overrides */\r
- p_ioctl->in.mad_svc.pfn_mad_send_cb = __proxy_mad_send_cb;\r
- p_ioctl->in.mad_svc.pfn_mad_recv_cb = __proxy_mad_recv_cb;\r
-\r
- p_ioctl->out.status = reg_mad_svc( h_qp,\r
- &p_ioctl->in.mad_svc, &h_mad_svc );\r
- if( p_ioctl->out.status == IB_SUCCESS )\r
- {\r
- p_ioctl->out.h_mad_svc = h_mad_svc->obj.hdl;\r
- h_mad_svc->obj.hdl_valid = TRUE;\r
- deref_al_obj( &h_mad_svc->obj );\r
- }\r
- else\r
- {\r
- p_ioctl->out.h_mad_svc = AL_INVALID_HANDLE;\r
- }\r
-\r
- deref_al_obj( &h_qp->obj );\r
-\r
- AL_EXIT( AL_DBG_MAD );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Deregister the MAD service.\r
- */\r
-static cl_status_t\r
-proxy_dereg_mad_svc(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_dereg_mad_svc_ioctl_t *p_ioctl;\r
- al_dev_open_context_t *p_context;\r
- ib_mad_svc_handle_t h_mad_svc;\r
-\r
- AL_ENTER( AL_DBG_MAD );\r
-\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("IOCTL buffer is invalid\n") );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- p_ioctl = (ual_dereg_mad_svc_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate MAD service. */\r
- h_mad_svc = (ib_mad_svc_handle_t)al_hdl_ref(\r
- p_context->h_al, p_ioctl->in.h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC );\r
- if( !h_mad_svc )\r
- {\r
- p_ioctl->out.status = IB_INVALID_HANDLE;\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );\r
- return CL_SUCCESS;\r
- }\r
-\r
- /* Destroy the MAD service. */\r
- h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, ib_sync_destroy );\r
- p_ioctl->out.status = IB_SUCCESS;\r
-\r
- AL_EXIT( AL_DBG_MAD );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * UAL only uses reg_mad_pool/dereg_mad_pool ioctls\r
- * create/destroy mad pool is implicit in these ioctls\r
- */\r
-static\r
-cl_status_t\r
-proxy_reg_mad_pool(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_reg_mad_pool_ioctl_t *p_ioctl =\r
- (ual_reg_mad_pool_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_pd_handle_t h_pd;\r
- ib_pool_key_t pool_key;\r
-\r
- AL_ENTER( AL_DBG_MAD );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_MAD );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate PD handle */\r
- h_pd = (ib_pd_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD );\r
- if( !h_pd )\r
- {\r
- p_ioctl->out.status = IB_INVALID_PD_HANDLE;\r
- p_ioctl->out.pool_key = AL_INVALID_HANDLE;\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") );\r
- return CL_SUCCESS;\r
- }\r
-\r
- /*\r
- * If we're in the kernel, we are using the global MAD pool. Other\r
- * MAD pools remain entirely in user-mode.\r
- */\r
-\r
- /* Register the PD with the MAD pool to obtain a pool_key. */\r
- p_ioctl->out.status = reg_mad_pool( gh_mad_pool, h_pd, &pool_key );\r
- if( p_ioctl->out.status == IB_SUCCESS )\r
- {\r
- /* Track the pool info with the process context. */\r
- p_ioctl->out.pool_key = pool_key->obj.hdl;\r
- pool_key->obj.hdl_valid = TRUE;\r
- deref_al_obj( &pool_key->obj );\r
- }\r
- else\r
- {\r
- p_ioctl->out.pool_key = AL_INVALID_HANDLE;\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("reg_mad_pool returned %s.\n",\r
- ib_get_err_str(p_ioctl->out.status)) );\r
- }\r
-\r
- deref_al_obj( &h_pd->obj );\r
-\r
- AL_EXIT( AL_DBG_MAD );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Deregister the pool_key with the MAD pool. Destroy the MAD pool if we\r
- * created one.\r
- */\r
-static\r
-cl_status_t\r
-proxy_dereg_mad_pool(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_dereg_mad_pool_ioctl_t *p_ioctl =\r
- (ual_dereg_mad_pool_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_pool_key_t pool_key;\r
-\r
- AL_ENTER( AL_DBG_MAD );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("IOCTL buffer is invalid\n") );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate pool key */\r
- pool_key = (ib_pool_key_t)al_hdl_ref(\r
- p_context->h_al, p_ioctl->in.pool_key, AL_OBJ_TYPE_H_POOL_KEY );\r
- if( !pool_key )\r
- {\r
- p_ioctl->out.status = IB_INVALID_HANDLE;\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("User-mode provided pool key is invalid\n") );\r
- return CL_SUCCESS;\r
- }\r
-\r
- /* We should only have alias pool keys exported to user-mode. */\r
- p_ioctl->out.status = dereg_mad_pool( pool_key, AL_KEY_ALIAS );\r
- if( p_ioctl->out.status != IB_SUCCESS )\r
- {\r
- deref_al_obj( &pool_key->obj );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("dereg_mad_pool failed: %s\n",\r
- ib_get_err_str( p_ioctl->out.status )) );\r
- }\r
-\r
- AL_EXIT( AL_DBG_MAD );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-cl_status_t\r
-proxy_cancel_mad(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_cancel_mad_ioctl_t *p_ioctl;\r
- al_dev_open_context_t *p_context;\r
- ib_mad_svc_handle_t h_mad_svc;\r
-\r
- AL_ENTER( AL_DBG_MAD );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_MAD );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- p_ioctl = (ual_cancel_mad_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate MAD service handle. */\r
- h_mad_svc = (ib_mad_svc_handle_t)al_hdl_ref(\r
- p_context->h_al, p_ioctl->in.h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC );\r
- if( !h_mad_svc )\r
- {\r
- p_ioctl->out.status = IB_INVALID_HANDLE;\r
- AL_EXIT( AL_DBG_MAD );\r
- return CL_SUCCESS;\r
- }\r
-\r
- p_ioctl->out.status =\r
- ib_cancel_mad( h_mad_svc, p_ioctl->in.h_proxy_element );\r
-\r
- /*\r
- * The clean up of resources allocated for the sent mad will\r
- * be handled in the send completion callback\r
- */\r
- AL_EXIT( AL_DBG_MAD );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_LOCAL_MAD:\r
- */\r
-static cl_status_t\r
-proxy_local_mad(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_local_mad_ioctl_t *p_ioctl =\r
- (ual_local_mad_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_ca_handle_t h_ca;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_MAD );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_MAD );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- if( ((ib_mad_t*)p_ioctl->in.mad_in)->method != IB_MAD_METHOD_GET )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("invalid method %d\n", ((ib_mad_t*)p_ioctl->in.mad_in)->method) );\r
- status = IB_UNSUPPORTED;\r
- goto proxy_local_mad_err;\r
- }\r
-\r
- /* Validate CA handle */\r
- h_ca = (ib_ca_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_ca, AL_OBJ_TYPE_H_CA );\r
- if( !h_ca )\r
- {\r
- status = IB_INVALID_CA_HANDLE;\r
- goto proxy_local_mad_err;\r
- }\r
- \r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
- \r
- status = ib_local_mad(\r
- h_ca, p_ioctl->in.port_num, p_ioctl->in.mad_in, p_ioctl->out.mad_out );\r
-\r
- deref_al_obj( &h_ca->obj );\r
-\r
-proxy_local_mad_err:\r
- p_ioctl->out.status = status;\r
-\r
- AL_EXIT( AL_DBG_MAD );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-cl_status_t\r
-subnet_ioctl(\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- cl_status_t cl_status;\r
- IO_STACK_LOCATION *p_io_stack;\r
- void *p_context;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- CL_ASSERT( h_ioctl && p_ret_bytes );\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
- p_context = p_io_stack->FileObject->FsContext;\r
-\r
- if( !p_context )\r
- {\r
- AL_EXIT( AL_DBG_DEV );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- switch( cl_ioctl_ctl_code( h_ioctl ) )\r
- {\r
- case UAL_REG_SVC:\r
- cl_status = proxy_reg_svc( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_SEND_SA_REQ:\r
- cl_status = proxy_send_sa_req( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CANCEL_SA_REQ:\r
- cl_status = proxy_cancel_sa_req( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_MAD_SEND:\r
- cl_status = proxy_send_mad( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_INIT_DGRM_SVC:\r
- cl_status = proxy_init_dgrm( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_REG_MAD_SVC:\r
- cl_status = proxy_reg_mad_svc( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_REG_MAD_POOL:\r
- cl_status = proxy_reg_mad_pool( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CANCEL_MAD:\r
- cl_status = proxy_cancel_mad( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_MAD_RECV_COMP:\r
- cl_status = proxy_mad_comp( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_DEREG_SVC:\r
- cl_status = proxy_dereg_svc( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_DEREG_MAD_SVC:\r
- cl_status = proxy_dereg_mad_svc( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_DEREG_MAD_POOL:\r
- cl_status = proxy_dereg_mad_pool( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_LOCAL_MAD:\r
- cl_status = proxy_local_mad( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- default:\r
- cl_status = CL_INVALID_PARAMETER;\r
- break;\r
- }\r
-\r
- return cl_status;\r
-}\r
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_proxy_verbs.c 931 2008-01-31 09:20:41Z leonidk $\r
- */\r
-\r
-\r
-#include <complib/comp_lib.h>\r
-#include <iba/ib_al.h>\r
-#include <iba/ib_al_ioctl.h>\r
-#include "al.h"\r
-#include "al_debug.h"\r
-\r
-#if defined(EVENT_TRACING)\r
-#ifdef offsetof\r
-#undef offsetof\r
-#endif\r
-#include "al_proxy_verbs.tmh"\r
-#endif\r
-\r
-#include "al_dev.h"\r
-/* Get the internal definitions of apis for the proxy */\r
-#include "al_ca.h"\r
-#include "al_pd.h"\r
-#include "al_qp.h"\r
-#include "al_srq.h"\r
-#include "al_cq.h"\r
-#include "al_mr.h"\r
-#include "al_mw.h"\r
-#include "al_av.h"\r
-#include "al_ci_ca.h"\r
-#include "al_mgr.h"\r
-#include "ib_common.h"\r
-#include "al_proxy.h"\r
-\r
-\r
-extern al_mgr_t *gp_al_mgr;\r
-\r
-\r
-/*\r
- *\r
- * Utility function to:\r
- * a. allocate an umv_buf and p_buf in kernel space\r
- * b. copy umv_buf and the contents of p_buf from user-mode\r
- *\r
- * It is assumed that the p_buf does not have any embedded user-mode pointers\r
- */\r
-\r
-ib_api_status_t\r
-cpyin_umvbuf(\r
- IN ci_umv_buf_t *p_src,\r
- OUT ci_umv_buf_t **pp_dst )\r
-{\r
- size_t umv_buf_size;\r
- ci_umv_buf_t *p_dest;\r
-\r
- /* Allocate space for umv_buf */\r
- CL_ASSERT( pp_dst );\r
-\r
- umv_buf_size = sizeof(ci_umv_buf_t);\r
- umv_buf_size += MAX(p_src->input_size, p_src->output_size);\r
-\r
- if( p_src->p_inout_buf )\r
- {\r
- if( p_src->input_size && \r
- cl_check_for_read( p_src->p_inout_buf, (size_t)p_src->input_size )\r
- != CL_SUCCESS )\r
- {\r
- /* user-supplied memory area not readable */\r
- return IB_INVALID_PERMISSION;\r
- }\r
- if( p_src->output_size &&\r
- cl_check_for_write( p_src->p_inout_buf, (size_t)p_src->output_size )\r
- != CL_SUCCESS )\r
- {\r
- /* user-supplied memory area not writeable */\r
- return IB_INVALID_PERMISSION;\r
- }\r
- }\r
- p_dest = (ci_umv_buf_t*)cl_zalloc( (size_t)umv_buf_size );\r
- if( !p_dest )\r
- return IB_INSUFFICIENT_MEMORY;\r
-\r
- /* Copy the umv_buf structure. */\r
- *p_dest = *p_src;\r
- if( p_src->p_inout_buf )\r
- p_dest->p_inout_buf = (void*)(p_dest + 1);\r
-\r
- /* Setup the buffer - either we have an input or output buffer */\r
- if( p_src->input_size )\r
- {\r
- if( cl_copy_from_user( p_dest->p_inout_buf, p_src->p_inout_buf,\r
- (size_t)p_src->input_size ) != CL_SUCCESS )\r
- {\r
- cl_free( p_dest );\r
- return IB_INVALID_PERMISSION;\r
- }\r
- }\r
- *pp_dst = p_dest;\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- *\r
- * Utility function to copy the results of umv_buf and the contents\r
- * of p_buf to umv_buf in user-space.\r
- *\r
- * It is assumed that the p_buf does not have any embedded user-mode pointers\r
- *\r
- * This function can NOT be called from asynchronous callbacks where\r
- * user process context may not be valid\r
- *\r
- */\r
-ib_api_status_t\r
-cpyout_umvbuf(\r
- IN ci_umv_buf_t *p_dest,\r
- IN ci_umv_buf_t *p_src)\r
-{\r
- CL_ASSERT( p_dest );\r
-\r
- if( p_src )\r
- {\r
- CL_ASSERT( p_dest->command == p_src->command );\r
- CL_ASSERT( p_dest->input_size == p_src->input_size );\r
- /* Copy output buf only on success. */\r
- if( p_src->status == IB_SUCCESS )\r
- {\r
- uint32_t out_size;\r
-\r
- out_size = MIN( p_dest->output_size, p_src->output_size );\r
-\r
- if( cl_copy_to_user( p_dest->p_inout_buf, p_src->p_inout_buf,\r
- out_size ) != CL_SUCCESS )\r
- {\r
- p_dest->output_size = 0;\r
- return IB_INVALID_PERMISSION;\r
- }\r
- p_dest->status = p_src->status;\r
- p_dest->output_size = out_size;\r
- }\r
- }\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-void\r
-free_umvbuf(\r
- IN ci_umv_buf_t *p_umv_buf )\r
-{\r
- if( p_umv_buf )\r
- cl_free( p_umv_buf );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_GET_VENDOR_LIBCFG:\r
- */\r
-static cl_status_t\r
-proxy_get_vendor_libcfg(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_get_uvp_name_ioctl_t *p_ioctl =\r
- (ual_get_uvp_name_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
- al_ci_ca_t *p_ci_ca;\r
-\r
- AL_ENTER( AL_DBG_CA );\r
-\r
- UNUSED_PARAM( p_open_context );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CA );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Find the CAguid */\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CA,\r
- ("CA guid %I64x.\n", p_ioctl->in.ca_guid) );\r
-\r
- cl_spinlock_acquire( &gp_al_mgr->obj.lock );\r
- p_ci_ca = find_ci_ca( p_ioctl->in.ca_guid );\r
-\r
- if( !p_ci_ca )\r
- {\r
- cl_spinlock_release( &gp_al_mgr->obj.lock );\r
- p_ioctl->out.status = IB_NOT_FOUND;\r
- }\r
- else\r
- {\r
- /* found the ca guid, copy the user-mode verbs provider libname */\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CA,\r
- ("CA guid %I64x. libname (%s)\n",\r
- p_ioctl->in.ca_guid, p_ci_ca->verbs.libname) );\r
- cl_memcpy( p_ioctl->out.uvp_lib_name, p_ci_ca->verbs.libname,\r
- sizeof(p_ci_ca->verbs.libname));\r
- cl_spinlock_release( &gp_al_mgr->obj.lock );\r
- p_ioctl->out.status = IB_SUCCESS;\r
- }\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
- AL_EXIT( AL_DBG_CA );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Allocate an ioctl buffer of appropriate size\r
- * Copy the given ioctl buffer\r
- * Queue the ioctl buffer as needed\r
- */\r
-boolean_t\r
-proxy_queue_cb_buf(\r
- IN uintn_t cb_type,\r
- IN al_dev_open_context_t *p_context,\r
- IN void *p_cb_data,\r
- IN al_obj_t *p_al_obj OPTIONAL )\r
-{\r
- cl_qlist_t *p_cb_list;\r
- al_proxy_cb_info_t *p_cb_info;\r
- cl_ioctl_handle_t *ph_ioctl, h_ioctl;\r
- uintn_t ioctl_size;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
- \r
- /* Set up the appropriate callback list. */\r
- switch( cb_type )\r
- {\r
- case UAL_GET_CM_CB_INFO:\r
- p_cb_list = &p_context->cm_cb_list;\r
- ph_ioctl = &p_context->h_cm_ioctl;\r
- ioctl_size = sizeof( cm_cb_ioctl_info_t );\r
- break;\r
-\r
- case UAL_GET_COMP_CB_INFO:\r
- p_cb_list = &p_context->comp_cb_list;\r
- ph_ioctl = &p_context->h_comp_ioctl;\r
- ioctl_size = sizeof( comp_cb_ioctl_info_t );\r
- break;\r
-\r
- case UAL_GET_MISC_CB_INFO:\r
- p_cb_list = &p_context->misc_cb_list;\r
- ph_ioctl = &p_context->h_misc_ioctl;\r
- ioctl_size = sizeof( misc_cb_ioctl_info_t );\r
- break;\r
-\r
- default:\r
- return FALSE;\r
- }\r
-\r
- /* Get a callback record to queue the callback. */\r
- p_cb_info = proxy_cb_get( p_context );\r
- if( !p_cb_info )\r
- return FALSE;\r
-\r
- cl_memcpy( &p_cb_info->cb_type, p_cb_data, ioctl_size );\r
-\r
- /*\r
- * If an AL object was specified, we need to reference it to prevent its\r
- * destruction until the callback has been fully specified.\r
- */\r
- if( p_al_obj )\r
- {\r
- p_cb_info->p_al_obj = p_al_obj;\r
- ref_al_obj( p_al_obj );\r
- }\r
-\r
- /* Insert the callback record into the callback list */\r
- cl_spinlock_acquire( &p_context->cb_lock );\r
- cl_qlist_insert_tail( p_cb_list, &p_cb_info->pool_item.list_item );\r
-\r
- /* See if there is a pending IOCTL ready to receive the callback. */\r
- if( *ph_ioctl )\r
- {\r
- h_ioctl = *ph_ioctl;\r
- *ph_ioctl = NULL;\r
-#pragma warning(push, 3)\r
- IoSetCancelRoutine( h_ioctl, NULL );\r
-#pragma warning(pop)\r
-\r
- p_cb_info->reported = TRUE;\r
-\r
- /* Complete the IOCTL to return the callback information. */\r
- CL_ASSERT( cl_ioctl_out_size( h_ioctl ) >= ioctl_size );\r
- cl_memcpy( cl_ioctl_out_buf( h_ioctl ), p_cb_data, ioctl_size );\r
- cl_ioctl_complete( h_ioctl, CL_SUCCESS, ioctl_size );\r
- proxy_context_deref( p_context );\r
- }\r
- cl_spinlock_release( &p_context->cb_lock );\r
-\r
- AL_EXIT( AL_DBG_DEV );\r
- return TRUE;\r
-}\r
-\r
-\r
-/*\r
- * Proxy's ca error callback\r
- * The context in the error record is proxy's ca context\r
- * Context is the a list object in the CA list\r
- */\r
-static void\r
-proxy_ca_err_cb(\r
- IN ib_async_event_rec_t *p_err_rec)\r
-{\r
- ib_ca_handle_t h_ca = p_err_rec->handle.h_ca;\r
- al_dev_open_context_t *p_context = h_ca->obj.h_al->p_context;\r
- misc_cb_ioctl_info_t cb_info;\r
-\r
- /*\r
- * If we're already closing the device - do not queue a callback, since\r
- * we're cleaning up the callback lists.\r
- */\r
- if( !proxy_context_ref( p_context ) )\r
- {\r
- proxy_context_deref( p_context );\r
- return;\r
- }\r
-\r
- /* Set up context and callback record type appropriate for UAL */\r
- cb_info.rec_type = CA_ERROR_REC;\r
- /* Return the Proxy's open_ca handle and the user's context */\r
- cb_info.ioctl_rec.event_rec = *p_err_rec;\r
- cb_info.ioctl_rec.event_rec.handle.h_ca = (ib_ca_handle_t)h_ca->obj.hdl;\r
-\r
- /* The proxy handle must be valid now. */\r
- if( !h_ca->obj.hdl_valid )\r
- h_ca->obj.hdl_valid = TRUE;\r
-\r
- proxy_queue_cb_buf( UAL_GET_MISC_CB_INFO, p_context, &cb_info,\r
- &h_ca->obj );\r
-\r
- proxy_context_deref( p_context );\r
-}\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_OPEN_CA:\r
- *\r
- * Returns the ca_list_obj as the handle to UAL\r
- */\r
-static cl_status_t\r
-proxy_open_ca(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_open_ca_ioctl_t *p_ioctl =\r
- (ual_open_ca_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_ca_handle_t h_ca;\r
- ci_umv_buf_t *p_umv_buf = NULL;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_CA );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CA );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases. */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_open_ca_err;\r
-\r
- status = open_ca( p_context->h_al, p_ioctl->in.guid, proxy_ca_err_cb,\r
- p_ioctl->in.context, &h_ca, p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_open_ca_err;\r
-\r
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
- if( status == IB_SUCCESS )\r
- {\r
- p_ioctl->out.h_ca = h_ca->obj.hdl;\r
- h_ca->obj.hdl_valid = TRUE;\r
- /* Release the reference taken in init_al_obj */\r
- deref_al_obj( &h_ca->obj );\r
- }\r
- else\r
- {\r
- h_ca->obj.pfn_destroy( &h_ca->obj, NULL );\r
-\r
-proxy_open_ca_err: /* getting a handle failed. */\r
- p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
- p_ioctl->out.h_ca = AL_INVALID_HANDLE;\r
- }\r
- free_umvbuf( p_umv_buf );\r
-\r
- p_ioctl->out.status = status;\r
-\r
- AL_EXIT( AL_DBG_CA );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_QUERY_CA:\r
- */\r
-static cl_status_t\r
-proxy_query_ca(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_query_ca_ioctl_t *p_ioctl =\r
- (ual_query_ca_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_ca_handle_t h_ca;\r
- ib_ca_attr_t *p_ca_attr = NULL;\r
- ci_umv_buf_t *p_umv_buf = NULL;\r
- ib_api_status_t status;\r
- uint32_t byte_cnt = 0;\r
-\r
- AL_ENTER( AL_DBG_CA );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CA );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate CA handle */\r
- h_ca = (ib_ca_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_ca, AL_OBJ_TYPE_H_CA );\r
- if( !h_ca )\r
- {\r
- status = IB_INVALID_CA_HANDLE;\r
- goto proxy_query_ca_err;\r
- }\r
-\r
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_query_ca_err;\r
-\r
- byte_cnt = p_ioctl->in.byte_cnt;\r
- if( p_ioctl->in.p_ca_attr && byte_cnt )\r
- {\r
- p_ca_attr = (ib_ca_attr_t*)cl_zalloc( byte_cnt );\r
- if( !p_ca_attr )\r
- {\r
- status = IB_INSUFFICIENT_MEMORY;\r
- goto proxy_query_ca_err;\r
- }\r
- }\r
- status = query_ca( h_ca, p_ca_attr, &byte_cnt, p_umv_buf );\r
-\r
- if( status != IB_SUCCESS )\r
- goto proxy_query_ca_err;\r
-\r
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
-\r
- if( status != IB_SUCCESS )\r
- goto proxy_query_ca_err;\r
-\r
- /* copy CA attribute back to user */\r
- if( p_ca_attr )\r
- {\r
- __try\r
- {\r
- ProbeForWrite( p_ioctl->in.p_ca_attr, byte_cnt, sizeof(void*) );\r
- ib_copy_ca_attr( p_ioctl->in.p_ca_attr, p_ca_attr );\r
- }\r
- __except(EXCEPTION_EXECUTE_HANDLER)\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Failed to copy CA attributes to user buffer %016I64x\n",\r
- (LONG64)p_ioctl->in.p_ca_attr) );\r
- status = IB_INVALID_PERMISSION;\r
- }\r
- }\r
-\r
- /* Free the ca_attr buffer allocated locally */\r
- if( status != IB_SUCCESS )\r
- {\r
-proxy_query_ca_err:\r
- p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
- }\r
- if( p_ca_attr )\r
- cl_free( p_ca_attr );\r
-\r
- free_umvbuf( p_umv_buf );\r
-\r
- if( h_ca )\r
- deref_al_obj( &h_ca->obj );\r
-\r
- p_ioctl->out.status = status;\r
- p_ioctl->out.byte_cnt = byte_cnt;\r
-\r
- AL_EXIT( AL_DBG_CA );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_MODIFY_CA:\r
- */\r
-static\r
-cl_status_t\r
-proxy_modify_ca(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_modify_ca_ioctl_t *p_ioctl =\r
- (ual_modify_ca_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_ca_handle_t h_ca;\r
-\r
- AL_ENTER( AL_DBG_CA );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CA );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate CA handle */\r
- h_ca = (ib_ca_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_ca, AL_OBJ_TYPE_H_CA );\r
- if( !h_ca )\r
- {\r
- p_ioctl->out.status = IB_INVALID_CA_HANDLE;\r
- AL_EXIT( AL_DBG_CA );\r
- return CL_SUCCESS;\r
- }\r
-\r
- p_ioctl->out.status = ib_modify_ca( h_ca, p_ioctl->in.port_num,\r
- p_ioctl->in.ca_mod, &p_ioctl->in.port_attr_mod );\r
-\r
- deref_al_obj( &h_ca->obj );\r
-\r
- AL_EXIT( AL_DBG_CA );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_CLOSE_CA:\r
- */\r
-static\r
-cl_status_t\r
-proxy_close_ca(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_close_ca_ioctl_t *p_ioctl =\r
- (ual_close_ca_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_ca_handle_t h_ca;\r
-\r
- AL_ENTER( AL_DBG_CA );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CA );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate CA handle */\r
- h_ca = (ib_ca_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_ca, AL_OBJ_TYPE_H_CA );\r
- if( !h_ca )\r
- {\r
- p_ioctl->out.status = IB_INVALID_CA_HANDLE;\r
- AL_EXIT( AL_DBG_CA );\r
- return CL_SUCCESS;\r
- }\r
-\r
- /*\r
- * Note that we hold a reference on the CA, so we need to\r
- * call close_ca, not ib_close_ca. We also don't release the reference\r
- * since close_ca will do so (by destroying the object).\r
- */\r
- h_ca->obj.pfn_destroy( &h_ca->obj, ib_sync_destroy );\r
- p_ioctl->out.status = IB_SUCCESS;\r
-\r
- AL_EXIT( AL_DBG_CA );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Validates the proxy handles and converts them to AL handles\r
- */\r
-static ib_api_status_t\r
-__convert_to_al_handles(\r
- IN al_dev_open_context_t* const p_context,\r
- IN uint64_t* const um_handle_array,\r
- IN uint32_t num_handles,\r
- OUT void* __ptr64 * const p_handle_array )\r
-{\r
- uint32_t i;\r
-\r
- for( i = 0; i < num_handles; i++ )\r
- {\r
- /* Validate the handle in the resource map */\r
- p_handle_array[i] = al_hdl_ref(\r
- p_context->h_al, um_handle_array[i], AL_OBJ_TYPE_UNKNOWN );\r
- if( !p_handle_array[i] )\r
- {\r
- /* Release references taken so far. */\r
- while( i-- )\r
- deref_al_obj( p_handle_array[i] );\r
-\r
- /* Could not find the handle in the map */\r
- return IB_INVALID_HANDLE;\r
- }\r
- }\r
-\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-static\r
-cl_status_t\r
-proxy_ci_call(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_ci_call_ioctl_t *p_ioctl =\r
- (ual_ci_call_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_ca_handle_t h_ca;\r
- ci_umv_buf_t *p_umv_buf = NULL;\r
- void* p_ci_op_buf = NULL;\r
- void* p_ci_op_user_buf = NULL;\r
- void* __ptr64 * p_handle_array = NULL;\r
- size_t ci_op_buf_size;\r
- ib_api_status_t status;\r
- uint32_t num_handles;\r
-\r
- AL_ENTER( AL_DBG_CA );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CA );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- num_handles = p_ioctl->in.num_handles;\r
- if( num_handles > 1 &&\r
- cl_ioctl_in_size( h_ioctl ) != (sizeof(uint64_t) * (num_handles - 1)) )\r
- {\r
- AL_EXIT( AL_DBG_CA );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- ci_op_buf_size = (size_t)p_ioctl->in.ci_op.buf_size;\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate CA handle */\r
- h_ca = (ib_ca_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_ca, AL_OBJ_TYPE_H_CA );\r
- if( !h_ca )\r
- {\r
- p_ioctl->out.status = IB_INVALID_CA_HANDLE;\r
- AL_EXIT( AL_DBG_CA );\r
- return CL_SUCCESS;\r
- }\r
-\r
- /* Save the user buffer address */\r
- p_ci_op_user_buf = p_ioctl->in.ci_op.p_buf;\r
-\r
- /* Validate the handle array */\r
- if( num_handles )\r
- {\r
- p_handle_array = cl_malloc( sizeof(void* __ptr64) * num_handles );\r
- if( !p_handle_array )\r
- {\r
- p_ioctl->out.status = IB_INSUFFICIENT_MEMORY;\r
- deref_al_obj( &h_ca->obj );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed to allocate handle array.\n") );\r
- return CL_SUCCESS;\r
- }\r
-\r
- /*\r
- * Now we have the handle array in kernel space. Replace\r
- * the handles with the correct AL handles based on the\r
- * type\r
- */\r
- status = __convert_to_al_handles( p_context, p_ioctl->in.handle_array,\r
- num_handles, p_handle_array );\r
- if( status != IB_SUCCESS )\r
- {\r
- cl_free( p_handle_array );\r
- p_ioctl->out.status = status;\r
- deref_al_obj( &h_ca->obj );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed to convert handles.\n") );\r
- return CL_SUCCESS;\r
- }\r
- }\r
-\r
- /* Copy in the UMV buffer */\r
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_ci_call_err;\r
-\r
- if( p_ioctl->in.ci_op.buf_size && p_ioctl->in.ci_op.p_buf )\r
- {\r
- p_ci_op_buf = cl_zalloc( ci_op_buf_size );\r
- if( !p_ci_op_buf )\r
- {\r
- status = IB_INSUFFICIENT_MEMORY;\r
- goto proxy_ci_call_err;\r
- }\r
-\r
- /* Copy from user the buffer */\r
- if( cl_copy_from_user( p_ci_op_buf, p_ioctl->in.ci_op.p_buf,\r
- ci_op_buf_size ) != CL_SUCCESS )\r
- {\r
- status = IB_INVALID_PERMISSION;\r
- goto proxy_ci_call_err;\r
- }\r
- /* Update the buffer pointer to reference the kernel copy. */\r
- p_ioctl->in.ci_op.p_buf = p_ci_op_buf;\r
- }\r
-\r
- status = ci_call( h_ca, p_handle_array,\r
- num_handles, &p_ioctl->in.ci_op, p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_ci_call_err;\r
-\r
- /* Copy the umv_buf back to user space */\r
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- {\r
- status = IB_INVALID_PERMISSION;\r
- goto proxy_ci_call_err;\r
- }\r
-\r
- /*\r
- * Copy the data buffer. Copy the buf size so that if the\r
- * num_bytes_ret is greater than the buffer size, we copy\r
- * only what the buffer can hold\r
- */\r
- if( cl_copy_to_user( p_ci_op_user_buf, p_ioctl->in.ci_op.p_buf,\r
- ci_op_buf_size ) != CL_SUCCESS )\r
- {\r
- status = IB_INVALID_PERMISSION;\r
- }\r
-\r
-proxy_ci_call_err:\r
-\r
- /* Restore the data buffer */\r
- p_ioctl->out.ci_op.p_buf = p_ci_op_user_buf;\r
- p_ioctl->out.status = status;\r
-\r
- /* Release the resources allocated */\r
- if( p_handle_array )\r
- {\r
- while( num_handles-- )\r
- deref_al_obj( (al_obj_t* __ptr64)p_handle_array[num_handles] );\r
- cl_free( p_handle_array );\r
- }\r
- if( p_ci_op_buf )\r
- cl_free( p_ci_op_buf );\r
-\r
- free_umvbuf( p_umv_buf );\r
-\r
- deref_al_obj( &h_ca->obj );\r
-\r
- AL_EXIT( AL_DBG_CA );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_ALLOC_PD:\r
- *\r
- * Returns the pd_list_obj as the handle to UAL\r
- */\r
-static\r
-cl_status_t\r
-proxy_alloc_pd(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_alloc_pd_ioctl_t *p_ioctl =\r
- (ual_alloc_pd_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_ca_handle_t h_ca;\r
- ib_pd_handle_t h_pd;\r
- ci_umv_buf_t *p_umv_buf = NULL;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_PD );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_PD );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate CA handle */\r
- h_ca = (ib_ca_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_ca, AL_OBJ_TYPE_H_CA );\r
- if( !h_ca )\r
- {\r
- status = IB_INVALID_CA_HANDLE;\r
- goto proxy_alloc_pd_err;\r
- }\r
-\r
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_alloc_pd_err;\r
-\r
- status = alloc_pd( h_ca, p_ioctl->in.type, p_ioctl->in.context,\r
- &h_pd, p_umv_buf );\r
-\r
- if( status != IB_SUCCESS )\r
- goto proxy_alloc_pd_err;\r
-\r
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
- if( status == IB_SUCCESS )\r
- {\r
- p_ioctl->out.h_pd = h_pd->obj.hdl;\r
- h_pd->obj.hdl_valid = TRUE;\r
- deref_al_obj( &h_pd->obj );\r
- }\r
- else\r
- {\r
- h_pd->obj.pfn_destroy( &h_pd->obj, NULL );\r
-\r
-proxy_alloc_pd_err:\r
- p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
- p_ioctl->out.h_pd = AL_INVALID_HANDLE;\r
- }\r
- free_umvbuf( p_umv_buf );\r
-\r
- if( h_ca )\r
- deref_al_obj( &h_ca->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_PD );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_DEALLOC_PD:\r
- */\r
-static cl_status_t\r
-proxy_dealloc_pd(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_dealloc_pd_ioctl_t *p_ioctl =\r
- (ual_dealloc_pd_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_pd_handle_t h_pd;\r
-\r
- AL_ENTER( AL_DBG_PD );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_PD );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate PD handle */\r
- h_pd = (ib_pd_handle_t)al_hdl_ref(\r
- p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD );\r
- if( !h_pd )\r
- {\r
- p_ioctl->out.status = IB_INVALID_PD_HANDLE;\r
- AL_EXIT( AL_DBG_PD );\r
- return CL_SUCCESS;\r
- }\r
-\r
- h_pd->obj.pfn_destroy( &h_pd->obj, ib_sync_destroy );\r
- p_ioctl->out.status = IB_SUCCESS;\r
-\r
- AL_EXIT( AL_DBG_PD );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Proxy's SRQ error handler\r
- */\r
-static void\r
-proxy_srq_err_cb(\r
- IN ib_async_event_rec_t *p_err_rec )\r
-{\r
- ib_srq_handle_t h_srq = p_err_rec->handle.h_srq;\r
- al_dev_open_context_t *p_context = h_srq->obj.h_al->p_context;\r
- misc_cb_ioctl_info_t cb_info;\r
-\r
- AL_ENTER( AL_DBG_QP );\r
-\r
- /*\r
- * If we're already closing the device - do not queue a callback, since\r
- * we're cleaning up the callback lists.\r
- */\r
- if( !proxy_context_ref( p_context ) )\r
- {\r
- proxy_context_deref( p_context );\r
- return;\r
- }\r
-\r
- /* Set up context and callback record type appropriate for UAL */\r
- cb_info.rec_type = SRQ_ERROR_REC;\r
- /* Return the Proxy's SRQ handle and the user's context */\r
- cb_info.ioctl_rec.event_rec = *p_err_rec;\r
- cb_info.ioctl_rec.event_rec.handle.h_srq = (ib_srq_handle_t)h_srq->obj.hdl;\r
-\r
- /* The proxy handle must be valid now. */\r
- if( !h_srq->obj.hdl_valid )\r
- h_srq->obj.hdl_valid = TRUE;\r
-\r
- proxy_queue_cb_buf(\r
- UAL_GET_MISC_CB_INFO, p_context, &cb_info, &h_srq->obj );\r
-\r
- proxy_context_deref( p_context );\r
-\r
- AL_EXIT( AL_DBG_QP );\r
-}\r
-\r
-/*\r
- * Process the ioctl UAL_CREATE_SRQ\r
- *\r
- * Returns the srq_list_obj as the handle to UAL\r
- */\r
-static cl_status_t\r
-proxy_create_srq(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_create_srq_ioctl_t *p_ioctl =\r
- (ual_create_srq_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_pd_handle_t h_pd;\r
- ib_srq_handle_t h_srq;\r
- ci_umv_buf_t *p_umv_buf = NULL;\r
- ib_api_status_t status;\r
- ib_pfn_event_cb_t pfn_ev;\r
-\r
- AL_ENTER( AL_DBG_SRQ );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_SRQ );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate handles. */\r
- h_pd = (ib_pd_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD );\r
- if( !h_pd)\r
- {\r
- status = IB_INVALID_PD_HANDLE;\r
- goto proxy_create_srq_err1;\r
- }\r
-\r
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_create_srq_err1;\r
-\r
- if( p_ioctl->in.ev_notify )\r
- pfn_ev = proxy_srq_err_cb;\r
- else\r
- pfn_ev = NULL;\r
-\r
- status = create_srq( h_pd, &p_ioctl->in.srq_attr, p_ioctl->in.context,\r
- pfn_ev, &h_srq, p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_create_srq_err1;\r
-\r
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
- if( status == IB_SUCCESS )\r
- {\r
- p_ioctl->out.h_srq = h_srq->obj.hdl;\r
- h_srq->obj.hdl_valid = TRUE;\r
- /* Release the reference taken in create_srq (by init_al_obj) */\r
- deref_al_obj( &h_srq->obj );\r
- }\r
- else\r
- {\r
-proxy_create_srq_err1:\r
- p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
- p_ioctl->out.h_srq = AL_INVALID_HANDLE;\r
- }\r
- free_umvbuf( p_umv_buf );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- if( h_pd )\r
- deref_al_obj( &h_pd->obj );\r
-\r
- AL_EXIT( AL_DBG_SRQ );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_QUERY_SRQ:\r
- */\r
-static\r
-cl_status_t\r
-proxy_query_srq(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_query_srq_ioctl_t *p_ioctl =\r
- (ual_query_srq_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_srq_handle_t h_srq;\r
- ci_umv_buf_t *p_umv_buf = NULL;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SRQ );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_SRQ );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate SRQ handle */\r
- h_srq = (ib_srq_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_srq, AL_OBJ_TYPE_H_SRQ );\r
- if( !h_srq )\r
- {\r
- status = IB_INVALID_SRQ_HANDLE;\r
- goto proxy_query_srq_err;\r
- }\r
-\r
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_query_srq_err;\r
-\r
- status = query_srq( h_srq, &p_ioctl->out.srq_attr, p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_query_srq_err;\r
-\r
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- {\r
-proxy_query_srq_err:\r
- p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
- cl_memclr( &p_ioctl->out.srq_attr, sizeof(ib_srq_attr_t) );\r
- }\r
- free_umvbuf( p_umv_buf );\r
-\r
- if( h_srq )\r
- deref_al_obj( &h_srq->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_SRQ );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_MODIFY_SRQ:\r
- */\r
-static\r
-cl_status_t\r
-proxy_modify_srq(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_modify_srq_ioctl_t *p_ioctl =\r
- (ual_modify_srq_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_srq_handle_t h_srq;\r
- ci_umv_buf_t *p_umv_buf = NULL;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SRQ );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_SRQ );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate SRQ handle */\r
- h_srq = (ib_srq_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_srq, AL_OBJ_TYPE_H_SRQ );\r
- if( !h_srq )\r
- {\r
- status = IB_INVALID_SRQ_HANDLE;\r
- goto proxy_modify_srq_err;\r
- }\r
-\r
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_modify_srq_err;\r
-\r
- status = modify_srq( h_srq, &p_ioctl->in.srq_attr, p_ioctl->in.srq_attr_mask, p_umv_buf );\r
-\r
- if( status != IB_SUCCESS )\r
- goto proxy_modify_srq_err;\r
- \r
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- {\r
-proxy_modify_srq_err:\r
- p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
- }\r
- free_umvbuf( p_umv_buf );\r
-\r
- if( h_srq )\r
- deref_al_obj( &h_srq->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_SRQ );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_DESTROY_SRQ\r
- */\r
-static cl_status_t\r
-proxy_destroy_srq(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_destroy_srq_ioctl_t *p_ioctl =\r
- (ual_destroy_srq_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_srq_handle_t h_srq;\r
-\r
- AL_ENTER( AL_DBG_SRQ );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_SRQ );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate SRQ handle */\r
- h_srq = (ib_srq_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_srq, AL_OBJ_TYPE_H_SRQ );\r
- if( !h_srq )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") );\r
- p_ioctl->out.status = IB_INVALID_SRQ_HANDLE;\r
- }\r
- else\r
- {\r
- h_srq->obj.pfn_destroy( &h_srq->obj, ib_sync_destroy );\r
- p_ioctl->out.status = IB_SUCCESS;\r
- }\r
-\r
- AL_EXIT( AL_DBG_SRQ );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Proxy's QP error handler\r
- */\r
-static void\r
-proxy_qp_err_cb(\r
- IN ib_async_event_rec_t *p_err_rec )\r
-{\r
- ib_qp_handle_t h_qp = p_err_rec->handle.h_qp;\r
- al_dev_open_context_t *p_context = h_qp->obj.h_al->p_context;\r
- misc_cb_ioctl_info_t cb_info;\r
-\r
- AL_ENTER( AL_DBG_QP );\r
-\r
- /*\r
- * If we're already closing the device - do not queue a callback, since\r
- * we're cleaning up the callback lists.\r
- */\r
- if( !proxy_context_ref( p_context ) )\r
- {\r
- proxy_context_deref( p_context );\r
- return;\r
- }\r
-\r
- /* Set up context and callback record type appropriate for UAL */\r
- cb_info.rec_type = QP_ERROR_REC;\r
- /* Return the Proxy's QP handle and the user's context */\r
- cb_info.ioctl_rec.event_rec = *p_err_rec;\r
- cb_info.ioctl_rec.event_rec.handle.h_qp = (ib_qp_handle_t)h_qp->obj.hdl;\r
-\r
- /* The proxy handle must be valid now. */\r
- if( !h_qp->obj.hdl_valid )\r
- h_qp->obj.hdl_valid = TRUE;\r
-\r
- proxy_queue_cb_buf(\r
- UAL_GET_MISC_CB_INFO, p_context, &cb_info, &h_qp->obj );\r
-\r
- proxy_context_deref( p_context );\r
-\r
- AL_EXIT( AL_DBG_QP );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_CREATE_QP\r
- *\r
- * Returns the qp_list_obj as the handle to UAL\r
- */\r
-static cl_status_t\r
-proxy_create_qp(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_create_qp_ioctl_t *p_ioctl =\r
- (ual_create_qp_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_pd_handle_t h_pd;\r
- ib_qp_handle_t h_qp;\r
- ib_srq_handle_t h_srq = NULL;\r
- ib_cq_handle_t h_sq_cq, h_rq_cq;\r
- ci_umv_buf_t *p_umv_buf = NULL;\r
- ib_api_status_t status;\r
- ib_pfn_event_cb_t pfn_ev;\r
-\r
- AL_ENTER( AL_DBG_QP );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_QP );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate handles. */\r
- h_pd = (ib_pd_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD );\r
- h_sq_cq = (ib_cq_handle_t)al_hdl_ref( p_context->h_al,\r
- (uint64_t)p_ioctl->in.qp_create.h_sq_cq, AL_OBJ_TYPE_H_CQ );\r
- h_rq_cq = (ib_cq_handle_t)al_hdl_ref( p_context->h_al,\r
- (uint64_t)p_ioctl->in.qp_create.h_rq_cq, AL_OBJ_TYPE_H_CQ );\r
- if (p_ioctl->in.qp_create.h_srq) {\r
- h_srq = (ib_srq_handle_t)al_hdl_ref( p_context->h_al,\r
- (uint64_t)p_ioctl->in.qp_create.h_srq, AL_OBJ_TYPE_H_SRQ );\r
- if( !h_srq)\r
- {\r
- status = IB_INVALID_SRQ_HANDLE;\r
- goto proxy_create_qp_err1;\r
- }\r
- }\r
- if( !h_pd)\r
- {\r
- status = IB_INVALID_PD_HANDLE;\r
- goto proxy_create_qp_err1;\r
- }\r
- if( !h_sq_cq )\r
- {\r
- status = IB_INVALID_CQ_HANDLE;\r
- goto proxy_create_qp_err1;\r
- }\r
- if( !h_rq_cq )\r
- {\r
- status = IB_INVALID_CQ_HANDLE;\r
- goto proxy_create_qp_err1;\r
- }\r
-\r
- /* Substitute rq_cq handle with AL's cq handle */\r
- p_ioctl->in.qp_create.h_sq_cq = h_sq_cq;\r
- /* Substitute rq_cq handle with AL's cq handle */\r
- p_ioctl->in.qp_create.h_rq_cq = h_rq_cq;\r
- /* Substitute srq handle with AL's srq handle */\r
- p_ioctl->in.qp_create.h_srq = h_srq;\r
-\r
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_create_qp_err1;\r
-\r
- if( p_ioctl->in.ev_notify )\r
- pfn_ev = proxy_qp_err_cb;\r
- else\r
- pfn_ev = NULL;\r
-\r
- status = create_qp( h_pd, &p_ioctl->in.qp_create, p_ioctl->in.context,\r
- pfn_ev, &h_qp, p_umv_buf );\r
- /* TODO: The create_qp call should return the attributes... */\r
- if( status != IB_SUCCESS )\r
- goto proxy_create_qp_err1;\r
-\r
- status = query_qp( h_qp, &p_ioctl->out.attr, NULL );\r
- if( status != IB_SUCCESS )\r
- goto proxy_create_qp_err2;\r
- \r
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
- if( status == IB_SUCCESS )\r
- {\r
- p_ioctl->out.h_qp = h_qp->obj.hdl;\r
- h_qp->obj.hdl_valid = TRUE;\r
- /* Release the reference taken in create_qp (by init_al_obj) */\r
- deref_al_obj( &h_qp->obj );\r
- }\r
- else\r
- {\r
-proxy_create_qp_err2:\r
- /*\r
- * Note that we hold the reference taken in create_qp (by init_al_obj)\r
- */\r
- h_qp->obj.pfn_destroy( &h_qp->obj, NULL );\r
-\r
-proxy_create_qp_err1:\r
- p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
- p_ioctl->out.h_qp = AL_INVALID_HANDLE;\r
- cl_memclr( &p_ioctl->out.attr, sizeof(ib_qp_attr_t) );\r
- }\r
- free_umvbuf( p_umv_buf );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- if( h_pd )\r
- deref_al_obj( &h_pd->obj );\r
- if( h_rq_cq )\r
- deref_al_obj( &h_rq_cq->obj );\r
- if( h_sq_cq )\r
- deref_al_obj( &h_sq_cq->obj );\r
- if( h_srq )\r
- deref_al_obj( &h_srq->obj );\r
-\r
- AL_EXIT( AL_DBG_QP );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_QUERY_QP:\r
- */\r
-static\r
-cl_status_t\r
-proxy_query_qp(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_query_qp_ioctl_t *p_ioctl =\r
- (ual_query_qp_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_qp_handle_t h_qp;\r
- ci_umv_buf_t *p_umv_buf = NULL;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_QP );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_QP );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate QP handle */\r
- h_qp = (ib_qp_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP );\r
- if( !h_qp )\r
- {\r
- status = IB_INVALID_QP_HANDLE;\r
- goto proxy_query_qp_err;\r
- }\r
-\r
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_query_qp_err;\r
-\r
- status = query_qp( h_qp, &p_ioctl->out.attr, p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_query_qp_err;\r
-\r
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
- if( status == IB_SUCCESS )\r
- {\r
- if( p_ioctl->out.attr.h_pd )\r
- {\r
- p_ioctl->out.attr.h_pd =\r
- (ib_pd_handle_t)p_ioctl->out.attr.h_pd->obj.hdl;\r
- }\r
- else\r
- {\r
- p_ioctl->out.attr.h_pd = NULL;\r
- }\r
- if( p_ioctl->out.attr.h_sq_cq )\r
- {\r
- p_ioctl->out.attr.h_sq_cq =\r
- (ib_cq_handle_t)p_ioctl->out.attr.h_sq_cq->obj.hdl;\r
- }\r
- else\r
- {\r
- p_ioctl->out.attr.h_sq_cq = NULL;\r
- }\r
- if( p_ioctl->out.attr.h_rq_cq )\r
- {\r
- p_ioctl->out.attr.h_rq_cq =\r
- (ib_cq_handle_t)p_ioctl->out.attr.h_rq_cq->obj.hdl;\r
- }\r
- else\r
- {\r
- p_ioctl->out.attr.h_rq_cq = NULL;\r
- }\r
- if( p_ioctl->out.attr.h_srq )\r
- {\r
- p_ioctl->out.attr.h_srq =\r
- (ib_srq_handle_t)p_ioctl->out.attr.h_srq->obj.hdl;\r
- }\r
- else\r
- {\r
- p_ioctl->out.attr.h_srq = NULL;\r
- }\r
- }\r
- else\r
- {\r
-proxy_query_qp_err:\r
- p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
- cl_memclr( &p_ioctl->out.attr, sizeof(ib_qp_attr_t) );\r
- }\r
- free_umvbuf( p_umv_buf );\r
-\r
- if( h_qp )\r
- deref_al_obj( &h_qp->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_QP );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_MODIFY_QP:\r
- */\r
-static\r
-cl_status_t\r
-proxy_modify_qp(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_modify_qp_ioctl_t *p_ioctl =\r
- (ual_modify_qp_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_qp_handle_t h_qp;\r
- ci_umv_buf_t *p_umv_buf = NULL;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_QP );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_QP );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate QP handle */\r
- h_qp = (ib_qp_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP );\r
- if( !h_qp )\r
- {\r
- status = IB_INVALID_QP_HANDLE;\r
- goto proxy_modify_qp_err;\r
- }\r
-\r
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_modify_qp_err;\r
-\r
- status = modify_qp( h_qp, &p_ioctl->in.modify_attr, p_umv_buf );\r
-\r
- if( status != IB_SUCCESS )\r
- goto proxy_modify_qp_err;\r
- \r
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- {\r
-proxy_modify_qp_err:\r
- p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
- }\r
- free_umvbuf( p_umv_buf );\r
-\r
- if( h_qp )\r
- deref_al_obj( &h_qp->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_QP );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_DESTROY_QP\r
- */\r
-static cl_status_t\r
-proxy_destroy_qp(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_destroy_qp_ioctl_t *p_ioctl =\r
- (ual_destroy_qp_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_qp_handle_t h_qp;\r
-\r
- AL_ENTER( AL_DBG_QP );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_QP );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate QP handle */\r
- h_qp = (ib_qp_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP );\r
- if( !h_qp )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
- p_ioctl->out.status = IB_INVALID_QP_HANDLE;\r
- }\r
- else\r
- {\r
- h_qp->obj.pfn_destroy( &h_qp->obj, ib_sync_destroy );\r
- p_ioctl->out.status = IB_SUCCESS;\r
- }\r
-\r
- AL_EXIT( AL_DBG_QP );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_CREATE_AV:\r
- */\r
-static\r
-cl_status_t\r
-proxy_create_av(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_create_av_ioctl_t *p_ioctl =\r
- (ual_create_av_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_pd_handle_t h_pd;\r
- ib_av_handle_t h_av;\r
- ci_umv_buf_t *p_umv_buf = NULL;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_AV );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_AV );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate PD handle */\r
- h_pd = (ib_pd_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD );\r
- if( !h_pd )\r
- {\r
- status = IB_INVALID_PD_HANDLE;\r
- goto proxy_create_av_err;\r
- }\r
-\r
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_create_av_err;\r
-\r
- status = create_av( h_pd, &p_ioctl->in.attr, &h_av, p_umv_buf );\r
-\r
- if( status != IB_SUCCESS )\r
- goto proxy_create_av_err;\r
-\r
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
- if( status == IB_SUCCESS )\r
- {\r
- p_ioctl->out.h_av = h_av->obj.hdl;\r
- h_av->obj.hdl_valid = TRUE;\r
- /* Release the reference taken in create_av. */\r
- deref_al_obj( &h_av->obj );\r
- }\r
- else\r
- {\r
- h_av->obj.pfn_destroy( &h_av->obj, NULL );\r
-\r
-proxy_create_av_err:\r
- p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
- p_ioctl->out.h_av = AL_INVALID_HANDLE;\r
- }\r
- free_umvbuf( p_umv_buf );\r
-\r
- if( h_pd )\r
- deref_al_obj( &h_pd->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
- \r
- AL_EXIT( AL_DBG_AV );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_QUERY_AV:\r
- */\r
-static\r
-cl_status_t\r
-proxy_query_av(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_query_av_ioctl_t *p_ioctl =\r
- (ual_query_av_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_av_handle_t h_av;\r
- ib_pd_handle_t h_pd;\r
- ci_umv_buf_t *p_umv_buf = NULL;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_AV );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_AV );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate AV handle */\r
- h_av = (ib_av_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_av, AL_OBJ_TYPE_H_AV );\r
- if( !h_av )\r
- {\r
- status = IB_INVALID_AV_HANDLE;\r
- goto proxy_query_av_err;\r
- }\r
-\r
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_query_av_err;\r
-\r
- status = query_av( h_av, &p_ioctl->out.attr, &h_pd, p_umv_buf );\r
-\r
- if( status != IB_SUCCESS )\r
- goto proxy_query_av_err;\r
-\r
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
- if( status == IB_SUCCESS )\r
- {\r
- /* Return proxy's PD handle when AV was created */\r
- p_ioctl->out.pd_context = (void*)h_pd->obj.context;\r
- }\r
- else\r
- {\r
-proxy_query_av_err:\r
- p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
- cl_memclr( &p_ioctl->out.attr, sizeof(ib_av_attr_t) );\r
- p_ioctl->out.pd_context = NULL;\r
- }\r
- free_umvbuf( p_umv_buf );\r
-\r
- if( h_av )\r
- deref_al_obj( &h_av->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_AV );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_MODIFY_AV:\r
- */\r
-static\r
-cl_status_t\r
-proxy_modify_av(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_modify_av_ioctl_t *p_ioctl =\r
- (ual_modify_av_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_av_handle_t h_av;\r
- ci_umv_buf_t *p_umv_buf = NULL;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_AV );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_AV );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate AV handle */\r
- h_av = (ib_av_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_av, AL_OBJ_TYPE_H_AV );\r
- if( !h_av )\r
- {\r
- status = IB_INVALID_AV_HANDLE;\r
- goto proxy_modify_av_err;\r
- }\r
-\r
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_modify_av_err;\r
-\r
- status = modify_av( h_av, &p_ioctl->in.attr, p_umv_buf );\r
-\r
- if( status != IB_SUCCESS )\r
- goto proxy_modify_av_err;\r
-\r
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- {\r
-proxy_modify_av_err:\r
- p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
- }\r
- free_umvbuf( p_umv_buf );\r
-\r
- if( h_av )\r
- deref_al_obj( &h_av->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_AV );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_DESTROY_AV:\r
- */\r
-static\r
-cl_status_t\r
-proxy_destroy_av(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_destroy_av_ioctl_t *p_ioctl =\r
- (ual_destroy_av_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_av_handle_t h_av;\r
-\r
- AL_ENTER( AL_DBG_AV );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_AV );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate AV handle */\r
- h_av = (ib_av_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_av, AL_OBJ_TYPE_H_AV );\r
- if( !h_av )\r
- {\r
- p_ioctl->out.status = IB_INVALID_AV_HANDLE;\r
- AL_EXIT( AL_DBG_AV );\r
- return CL_SUCCESS;\r
- }\r
-\r
- h_av->obj.pfn_destroy( &h_av->obj, NULL );\r
- p_ioctl->out.status = IB_SUCCESS;\r
-\r
- AL_EXIT( AL_DBG_AV );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_MODIFY_CQ:\r
- */\r
-static\r
-cl_status_t\r
-proxy_modify_cq(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_modify_cq_ioctl_t *p_ioctl =\r
- (ual_modify_cq_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_cq_handle_t h_cq;\r
- ci_umv_buf_t *p_umv_buf = NULL;\r
- ib_api_status_t status;\r
- uint32_t size;\r
-\r
- AL_ENTER( AL_DBG_CQ );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate CQ handle */\r
- h_cq = (ib_cq_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_cq, AL_OBJ_TYPE_H_CQ );\r
- if( !h_cq )\r
- {\r
- status = IB_INVALID_CQ_HANDLE;\r
- goto proxy_modify_cq_err;\r
- }\r
-\r
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_modify_cq_err;\r
-\r
- size = p_ioctl->in.size;\r
- status = modify_cq( h_cq, &size, p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_modify_cq_err;\r
-\r
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
- if( status == IB_SUCCESS )\r
- {\r
- p_ioctl->out.size = size;\r
- }\r
- else\r
- {\r
-proxy_modify_cq_err:\r
- p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
- p_ioctl->out.size = 0;\r
- }\r
- free_umvbuf( p_umv_buf );\r
-\r
- if( h_cq )\r
- deref_al_obj( &h_cq->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Proxy's CQ completion callback\r
- */\r
-static void\r
-proxy_cq_comp_cb(\r
- IN ib_cq_handle_t h_cq,\r
- IN void *cq_context )\r
-{\r
- comp_cb_ioctl_info_t cb_info;\r
- al_dev_open_context_t *p_context = h_cq->obj.h_al->p_context;\r
-\r
- /*\r
- * If we're already closing the device - do not queue a callback, since\r
- * we're cleaning up the callback lists.\r
- */\r
- if( !proxy_context_ref( p_context ) )\r
- {\r
- proxy_context_deref( p_context );\r
- return;\r
- }\r
-\r
- /* Set up context and callback record type appropriate for UAL */\r
- cb_info.cq_context = cq_context;\r
-\r
- /* The proxy handle must be valid now. */\r
- if( !h_cq->obj.hdl_valid )\r
- h_cq->obj.hdl_valid = TRUE;\r
-\r
- proxy_queue_cb_buf( UAL_GET_COMP_CB_INFO, p_context, &cb_info,\r
- &h_cq->obj );\r
- \r
- proxy_context_deref( p_context );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Proxy's CQ error callback\r
- */\r
-static void\r
-proxy_cq_err_cb(\r
- IN ib_async_event_rec_t *p_err_rec)\r
-{\r
- ib_cq_handle_t h_cq = p_err_rec->handle.h_cq;\r
- al_dev_open_context_t *p_context = h_cq->obj.h_al->p_context;\r
- misc_cb_ioctl_info_t cb_info;\r
-\r
- /*\r
- * If we're already closing the device - do not queue a callback, since\r
- * we're cleaning up the callback lists.\r
- */\r
- if( !proxy_context_ref( p_context ) )\r
- {\r
- proxy_context_deref( p_context );\r
- return;\r
- }\r
-\r
- /* Set up context and callback record type appropriate for UAL */\r
- cb_info.rec_type = CQ_ERROR_REC;\r
- /* Return the Proxy's cq handle and the user's context */\r
- cb_info.ioctl_rec.event_rec = *p_err_rec;\r
- cb_info.ioctl_rec.event_rec.handle.h_cq = (ib_cq_handle_t)h_cq->obj.hdl;\r
-\r
- /* The proxy handle must be valid now. */\r
- if( !h_cq->obj.hdl_valid )\r
- h_cq->obj.hdl_valid = TRUE;\r
-\r
- proxy_queue_cb_buf( UAL_GET_MISC_CB_INFO, p_context, &cb_info,\r
- &h_cq->obj );\r
- proxy_context_deref( p_context );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_CREATE_CQ:\r
- */\r
-static cl_status_t\r
-proxy_create_cq(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_create_cq_ioctl_t *p_ioctl =\r
- (ual_create_cq_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_ca_handle_t h_ca;\r
- ib_cq_handle_t h_cq;\r
- ib_cq_create_t cq_create;\r
- ci_umv_buf_t *p_umv_buf = NULL;\r
- ib_api_status_t status;\r
- ib_pfn_event_cb_t pfn_ev;\r
-\r
- AL_ENTER( AL_DBG_CQ );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate CA handle */\r
- h_ca = (ib_ca_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_ca, AL_OBJ_TYPE_H_CA );\r
- if( !h_ca )\r
- {\r
- status = IB_INVALID_CA_HANDLE;\r
- goto proxy_create_cq_err1;\r
- }\r
-\r
- cq_create.size = p_ioctl->in.size;\r
-\r
- if( p_ioctl->in.h_wait_obj )\r
- {\r
- cq_create.pfn_comp_cb = NULL;\r
- cq_create.h_wait_obj = cl_waitobj_ref( p_ioctl->in.h_wait_obj );\r
- if( !cq_create.h_wait_obj )\r
- {\r
- status = IB_INVALID_PARAMETER;\r
- goto proxy_create_cq_err1;\r
- }\r
- }\r
- else\r
- {\r
- /* Override with proxy's cq callback */\r
- cq_create.pfn_comp_cb = proxy_cq_comp_cb;\r
- cq_create.h_wait_obj = NULL;\r
- }\r
-\r
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_create_cq_err2;\r
-\r
- if( p_ioctl->in.ev_notify )\r
- pfn_ev = proxy_cq_err_cb;\r
- else\r
- pfn_ev = NULL;\r
-\r
- status = create_cq( h_ca, &cq_create, p_ioctl->in.context,\r
- pfn_ev, &h_cq, p_umv_buf );\r
-\r
- if( status != IB_SUCCESS )\r
- goto proxy_create_cq_err2;\r
-\r
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
- if( status == IB_SUCCESS )\r
- {\r
- p_ioctl->out.size = cq_create.size;\r
- p_ioctl->out.h_cq = h_cq->obj.hdl;\r
- h_cq->obj.hdl_valid = TRUE;\r
- deref_al_obj( &h_cq->obj );\r
- }\r
- else\r
- {\r
- h_cq->obj.pfn_destroy( &h_cq->obj, NULL );\r
-\r
-proxy_create_cq_err2:\r
- if( cq_create.h_wait_obj )\r
- cl_waitobj_deref( cq_create.h_wait_obj );\r
-\r
-proxy_create_cq_err1:\r
- p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
- p_ioctl->out.h_cq = AL_INVALID_HANDLE;\r
- p_ioctl->out.size = 0;\r
- }\r
- free_umvbuf( p_umv_buf );\r
-\r
- if( h_ca )\r
- deref_al_obj( &h_ca->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_QUERY_CQ:\r
- */\r
-static\r
-cl_status_t\r
-proxy_query_cq(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_query_cq_ioctl_t *p_ioctl =\r
- (ual_query_cq_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_cq_handle_t h_cq;\r
- ci_umv_buf_t *p_umv_buf = NULL;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_CQ );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate CQ handle */\r
- h_cq = (ib_cq_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_cq, AL_OBJ_TYPE_H_CQ );\r
- if( !h_cq )\r
- {\r
- status = IB_INVALID_CQ_HANDLE;\r
- goto proxy_query_cq_err;\r
- }\r
-\r
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_query_cq_err;\r
-\r
- status = query_cq( h_cq, &p_ioctl->out.size, p_umv_buf );\r
-\r
- if( status != IB_SUCCESS )\r
- goto proxy_query_cq_err;\r
-\r
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- {\r
-proxy_query_cq_err:\r
- p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
- p_ioctl->out.size = 0;\r
- }\r
- free_umvbuf( p_umv_buf );\r
-\r
- if( h_cq )\r
- deref_al_obj( &h_cq->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_DESTROY_CQ\r
- */\r
-static\r
-cl_status_t\r
-proxy_destroy_cq(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_destroy_cq_ioctl_t *p_ioctl =\r
- (ual_destroy_cq_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_cq_handle_t h_cq;\r
- cl_waitobj_handle_t h_wait_obj;\r
-\r
- AL_ENTER( AL_DBG_CQ );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate CQ handle */\r
- h_cq = (ib_cq_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_cq, AL_OBJ_TYPE_H_CQ );\r
- if( !h_cq )\r
- {\r
- p_ioctl->out.status = IB_INVALID_CQ_HANDLE;\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_SUCCESS;\r
- }\r
-\r
- h_wait_obj = h_cq->h_wait_obj;\r
-\r
- h_cq->obj.pfn_destroy( &h_cq->obj, ib_sync_destroy );\r
-\r
- /* Deref the wait object, if any. */\r
- if( h_wait_obj )\r
- cl_waitobj_deref( h_wait_obj );\r
-\r
- p_ioctl->out.status = IB_SUCCESS;\r
-\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_POST_SEND\r
- */\r
-static\r
-cl_status_t\r
-proxy_post_send(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_post_send_ioctl_t *p_ioctl =\r
- (ual_post_send_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_qp_handle_t h_qp;\r
- ib_av_handle_t h_av;\r
- ib_send_wr_t *p_wr;\r
- ib_send_wr_t *p_send_failure;\r
- uintn_t i = 0;\r
- ib_local_ds_t *p_ds;\r
- uintn_t num_ds = 0;\r
- ib_api_status_t status;\r
- size_t in_buf_sz;\r
-\r
- AL_ENTER( AL_DBG_QP );\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_QP );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /*\r
- * Additional input buffer validation based on actual settings.\r
- * Note that this validates that work requests are actually\r
- * being passed in.\r
- */\r
- in_buf_sz = sizeof(p_ioctl->in);\r
- in_buf_sz += sizeof(ib_send_wr_t) * (p_ioctl->in.num_wr - 1);\r
- in_buf_sz += sizeof(ib_local_ds_t) * p_ioctl->in.num_ds;\r
- if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz )\r
- {\r
- AL_EXIT( AL_DBG_QP );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Setup p_send_failure to head of list. */\r
- p_send_failure = p_wr = p_ioctl->in.send_wr;\r
-\r
- /* Validate QP handle */\r
- h_qp = (ib_qp_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP );\r
- if( !h_qp )\r
- {\r
- status = IB_INVALID_QP_HANDLE;\r
- goto proxy_post_send_done;\r
- }\r
-\r
- /* Setup the base data segment pointer. */\r
- p_ds = (ib_local_ds_t*)&p_ioctl->in.send_wr[p_ioctl->in.num_wr];\r
-\r
- /* Setup the user's work requests and data segments and translate. */\r
- for( i = 0; i < p_ioctl->in.num_wr; i++ )\r
- {\r
- if( h_qp->type == IB_QPT_UNRELIABLE_DGRM )\r
- {\r
- /* Validate the AV handle for UD */\r
- h_av = (ib_av_handle_t)al_hdl_ref( p_context->h_al,\r
- (uint64_t)p_wr[i].dgrm.ud.h_av, AL_OBJ_TYPE_H_AV );\r
- if( !h_av )\r
- {\r
- status = IB_INVALID_AV_HANDLE;\r
- goto proxy_post_send_done;\r
- }\r
- /* substitute with KAL AV handle */\r
- p_wr[i].dgrm.ud.h_av = h_av;\r
- }\r
-\r
- /* Setup the data segments, if any. */\r
- if( p_wr[i].num_ds )\r
- {\r
- num_ds += p_wr[i].num_ds;\r
- if( num_ds > p_ioctl->in.num_ds )\r
- {\r
- /*\r
- * The work request submitted exceed the number of data\r
- * segments specified in the IOCTL.\r
- */\r
- status = IB_INVALID_PARAMETER;\r
- goto proxy_post_send_done;\r
- }\r
- p_wr[i].ds_array = p_ds;\r
- p_ds += p_wr->num_ds;\r
- }\r
- else\r
- {\r
- p_wr[i].ds_array = NULL;\r
- }\r
-\r
- p_wr[i].p_next = &p_wr[i + 1];\r
- }\r
-\r
- /* Mark the end of list. */\r
- p_wr[i - 1].p_next = NULL;\r
-\r
- /* so much for the set up, let's roll! */\r
- status = ib_post_send( h_qp, p_wr, &p_send_failure );\r
-\r
- if( status == IB_SUCCESS )\r
- {\r
- p_ioctl->out.failed_cnt = 0;\r
- }\r
- else\r
- {\r
-proxy_post_send_done:\r
- /* First set up as if all failed. */\r
- p_ioctl->out.failed_cnt = p_ioctl->in.num_wr;\r
- /* Now subtract successful ones. */\r
- p_ioctl->out.failed_cnt -= (uint32_t)(\r
- (((uintn_t)p_send_failure) - ((uintn_t)p_wr))\r
- / sizeof(ib_send_wr_t));\r
- }\r
-\r
- /* releases the references on address vectors. */\r
- if( h_qp )\r
- {\r
- if( h_qp->type == IB_QPT_UNRELIABLE_DGRM )\r
- {\r
- while( i-- )\r
- deref_al_obj( &p_wr[i].dgrm.ud.h_av->obj );\r
- }\r
- deref_al_obj( &h_qp->obj );\r
- }\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_QP );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_POST_RECV\r
- */\r
-static\r
-cl_status_t\r
-proxy_post_recv(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_post_recv_ioctl_t *p_ioctl =\r
- (ual_post_recv_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_qp_handle_t h_qp;\r
- ib_recv_wr_t *p_wr;\r
- ib_recv_wr_t *p_recv_failure;\r
- uintn_t i;\r
- ib_local_ds_t *p_ds;\r
- uintn_t num_ds = 0;\r
- ib_api_status_t status;\r
- size_t in_buf_sz;\r
-\r
- AL_ENTER( AL_DBG_QP );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_QP );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /*\r
- * Additional input buffer validation based on actual settings.\r
- * Note that this validates that work requests are actually\r
- * being passed in.\r
- */\r
- in_buf_sz = sizeof(p_ioctl->in);\r
- in_buf_sz += sizeof(ib_recv_wr_t) * (p_ioctl->in.num_wr - 1);\r
- in_buf_sz += sizeof(ib_local_ds_t) * p_ioctl->in.num_ds;\r
- if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz )\r
- {\r
- AL_EXIT( AL_DBG_QP );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Setup p_send_failure to head of list. */\r
- p_recv_failure = p_wr = p_ioctl->in.recv_wr;\r
-\r
- /* Validate QP handle */\r
- h_qp = (ib_qp_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP );\r
- if( !h_qp )\r
- {\r
- status = IB_INVALID_QP_HANDLE;\r
- goto proxy_post_recv_done;\r
- }\r
-\r
- /* Setup the base data segment pointer. */\r
- p_ds = (ib_local_ds_t*)&p_ioctl->in.recv_wr[p_ioctl->in.num_wr];\r
-\r
- /* Setup the user's work requests and data segments and translate. */\r
- for( i = 0; i < p_ioctl->in.num_wr; i++ )\r
- {\r
- /* Setup the data segments, if any. */\r
- if( p_wr[i].num_ds )\r
- {\r
- num_ds += p_wr[i].num_ds;\r
- if( num_ds > p_ioctl->in.num_ds )\r
- {\r
- /*\r
- * The work request submitted exceed the number of data\r
- * segments specified in the IOCTL.\r
- */\r
- status = IB_INVALID_PARAMETER;\r
- goto proxy_post_recv_done;\r
- }\r
- p_wr[i].ds_array = p_ds;\r
- p_ds += p_wr->num_ds;\r
- }\r
- else\r
- {\r
- p_wr[i].ds_array = NULL;\r
- }\r
-\r
- p_wr[i].p_next = &p_wr[i + 1];\r
- }\r
-\r
- /* Mark the end of list. */\r
- p_wr[i-1].p_next = NULL;\r
-\r
- status = ib_post_recv( h_qp, p_wr, &p_recv_failure );\r
-\r
- if( status == IB_SUCCESS )\r
- {\r
- p_ioctl->out.failed_cnt = 0;\r
- }\r
- else\r
- {\r
-proxy_post_recv_done:\r
- /* First set up as if all failed. */\r
- p_ioctl->out.failed_cnt = p_ioctl->in.num_wr;\r
- /* Now subtract successful ones. */\r
- p_ioctl->out.failed_cnt -= (uint32_t)(\r
- (((uintn_t)p_recv_failure) - ((uintn_t)p_wr))\r
- / sizeof(ib_recv_wr_t));\r
- }\r
-\r
- if( h_qp )\r
- deref_al_obj( &h_qp->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_QP );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_POST_SRQ_RECV\r
- */\r
-static\r
-cl_status_t\r
-proxy_post_srq_recv(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_post_srq_recv_ioctl_t *p_ioctl =\r
- (ual_post_srq_recv_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_srq_handle_t h_srq;\r
- ib_recv_wr_t *p_wr;\r
- ib_recv_wr_t *p_recv_failure;\r
- uintn_t i;\r
- ib_local_ds_t *p_ds;\r
- uintn_t num_ds = 0;\r
- ib_api_status_t status;\r
- size_t in_buf_sz;\r
-\r
- AL_ENTER( AL_DBG_QP );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_QP );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /*\r
- * Additional input buffer validation based on actual settings.\r
- * Note that this validates that work requests are actually\r
- * being passed in.\r
- */\r
- in_buf_sz = sizeof(p_ioctl->in);\r
- in_buf_sz += sizeof(ib_recv_wr_t) * (p_ioctl->in.num_wr - 1);\r
- in_buf_sz += sizeof(ib_local_ds_t) * p_ioctl->in.num_ds;\r
- if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz )\r
- {\r
- AL_EXIT( AL_DBG_QP );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Setup p_send_failure to head of list. */\r
- p_recv_failure = p_wr = p_ioctl->in.recv_wr;\r
-\r
- /* Validate SRQ handle */\r
- h_srq = (ib_srq_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_srq, AL_OBJ_TYPE_H_QP );\r
- if( !h_srq )\r
- {\r
- status = IB_INVALID_SRQ_HANDLE;\r
- goto proxy_post_recv_done;\r
- }\r
-\r
- /* Setup the base data segment pointer. */\r
- p_ds = (ib_local_ds_t*)&p_ioctl->in.recv_wr[p_ioctl->in.num_wr];\r
-\r
- /* Setup the user's work requests and data segments and translate. */\r
- for( i = 0; i < p_ioctl->in.num_wr; i++ )\r
- {\r
- /* Setup the data segments, if any. */\r
- if( p_wr[i].num_ds )\r
- {\r
- num_ds += p_wr[i].num_ds;\r
- if( num_ds > p_ioctl->in.num_ds )\r
- {\r
- /*\r
- * The work request submitted exceed the number of data\r
- * segments specified in the IOCTL.\r
- */\r
- status = IB_INVALID_PARAMETER;\r
- goto proxy_post_recv_done;\r
- }\r
- p_wr[i].ds_array = p_ds;\r
- p_ds += p_wr->num_ds;\r
- }\r
- else\r
- {\r
- p_wr[i].ds_array = NULL;\r
- }\r
-\r
- p_wr[i].p_next = &p_wr[i + 1];\r
- }\r
-\r
- /* Mark the end of list. */\r
- p_wr[i-1].p_next = NULL;\r
-\r
- status = ib_post_srq_recv( h_srq, p_wr, &p_recv_failure );\r
-\r
- if( status == IB_SUCCESS )\r
- {\r
- p_ioctl->out.failed_cnt = 0;\r
- }\r
- else\r
- {\r
-proxy_post_recv_done:\r
- /* First set up as if all failed. */\r
- p_ioctl->out.failed_cnt = p_ioctl->in.num_wr;\r
- /* Now subtract successful ones. */\r
- p_ioctl->out.failed_cnt -= (uint32_t)(\r
- (((uintn_t)p_recv_failure) - ((uintn_t)p_wr))\r
- / sizeof(ib_recv_wr_t));\r
- }\r
-\r
- if( h_srq )\r
- deref_al_obj( &h_srq->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_QP );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_PEEK_CQ\r
- */\r
-static cl_status_t\r
-proxy_peek_cq(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_peek_cq_ioctl_t *p_ioctl =\r
- (ual_peek_cq_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_cq_handle_t h_cq;\r
-\r
- AL_ENTER( AL_DBG_CQ );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate CQ handle */\r
- h_cq = (ib_cq_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_cq, AL_OBJ_TYPE_H_CQ );\r
- if( !h_cq )\r
- {\r
- p_ioctl->out.status = IB_INVALID_CQ_HANDLE;\r
- p_ioctl->out.n_cqes = 0;\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_SUCCESS;\r
- }\r
-\r
- p_ioctl->out.status = ib_peek_cq( h_cq, &p_ioctl->out.n_cqes );\r
-\r
- deref_al_obj( &h_cq->obj );\r
-\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_POLL_CQ\r
- */\r
-static cl_status_t\r
-proxy_poll_cq(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_poll_cq_ioctl_t *p_ioctl;\r
- al_dev_open_context_t *p_context;\r
- ib_cq_handle_t h_cq;\r
- ib_wc_t *p_free_wc;\r
- ib_wc_t *p_done_wc = NULL;\r
- uint32_t i, num_wc;\r
- size_t out_buf_sz;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_CQ );\r
-\r
- p_ioctl = (ual_poll_cq_ioctl_t*)cl_ioctl_in_buf( h_ioctl );\r
- p_context = (al_dev_open_context_t*)p_open_context;\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) < sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /*\r
- * Additional validation of input and output sizes.\r
- * Note that this also checks that work completions are actually\r
- * being passed in.\r
- */\r
- out_buf_sz = sizeof(p_ioctl->out);\r
- out_buf_sz += sizeof(ib_wc_t) * (p_ioctl->in.num_wc - 1);\r
- if( cl_ioctl_out_size( h_ioctl ) != out_buf_sz )\r
- {\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate CQ handle. */\r
- h_cq = (ib_cq_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_cq, AL_OBJ_TYPE_H_CQ );\r
- if( !h_cq )\r
- {\r
- status = IB_INVALID_CQ_HANDLE;\r
- goto proxy_poll_cq_err;\r
- }\r
-\r
- p_free_wc = p_ioctl->out.wc;\r
- num_wc = p_ioctl->in.num_wc;\r
- for( i = 0; i < num_wc; i++ )\r
- p_free_wc[i].p_next = &p_free_wc[i+1];\r
- p_free_wc[i - 1].p_next = NULL;\r
-\r
- status = ib_poll_cq( h_cq, &p_free_wc, &p_done_wc );\r
-\r
- /*\r
- * If any of the completions are done, copy to user\r
- * otherwise, just return\r
- */\r
- if( status == IB_SUCCESS )\r
- {\r
- CL_ASSERT( p_done_wc );\r
- /* Calculate the number of WCs. */\r
- if( !p_free_wc )\r
- {\r
- p_ioctl->out.num_wc = num_wc;\r
- }\r
- else\r
- {\r
- p_ioctl->out.num_wc = (uint32_t)\r
- (((uintn_t)p_free_wc) - ((uintn_t)p_done_wc)) /\r
- sizeof(ib_wc_t);\r
- }\r
- }\r
- else\r
- {\r
-proxy_poll_cq_err:\r
- p_ioctl->out.num_wc = 0;\r
- }\r
-\r
- if( h_cq )\r
- deref_al_obj( &h_cq->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out) - sizeof(ib_wc_t);\r
- if( p_ioctl->out.num_wc )\r
- *p_ret_bytes += (sizeof(ib_wc_t) * (p_ioctl->out.num_wc));\r
-\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_REARM_CQ\r
- */\r
-static cl_status_t\r
-proxy_rearm_cq(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_rearm_cq_ioctl_t *p_ioctl =\r
- (ual_rearm_cq_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_cq_handle_t h_cq;\r
-\r
- AL_ENTER( AL_DBG_CQ );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate CQ handle */\r
- h_cq = (ib_cq_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_cq, AL_OBJ_TYPE_H_CQ );\r
- if( !h_cq )\r
- {\r
- p_ioctl->out.status = IB_INVALID_CQ_HANDLE;\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_SUCCESS;\r
- }\r
-\r
- p_ioctl->out.status = ib_rearm_cq( h_cq, p_ioctl->in.solicited );\r
-\r
- deref_al_obj( &h_cq->obj );\r
-\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_REARM_N_CQ\r
- */\r
-static\r
-cl_status_t\r
-proxy_rearm_n_cq(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_rearm_n_cq_ioctl_t *p_ioctl =\r
- (ual_rearm_n_cq_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_cq_handle_t h_cq;\r
-\r
- AL_ENTER( AL_DBG_CQ );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate CQ handle */\r
- h_cq = (ib_cq_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_cq, AL_OBJ_TYPE_H_CQ );\r
- if( !h_cq )\r
- {\r
- p_ioctl->out.status = IB_INVALID_CQ_HANDLE;\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_SUCCESS;\r
- }\r
-\r
- p_ioctl->out.status = ib_rearm_n_cq( h_cq, p_ioctl->in.n_cqes );\r
-\r
- deref_al_obj( &h_cq->obj );\r
-\r
- AL_EXIT( AL_DBG_CQ );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_REGISTER_MEM:\r
- */\r
-static cl_status_t\r
-proxy_register_mr(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_reg_mem_ioctl_t *p_ioctl =\r
- (ual_reg_mem_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_pd_handle_t h_pd;\r
- ib_mr_handle_t h_mr;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_MR );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_MR );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate PD handle */\r
- h_pd = (ib_pd_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD );\r
- if( !h_pd )\r
- {\r
- status = IB_INVALID_PD_HANDLE;\r
- goto proxy_register_mr_err;\r
- }\r
-\r
- /* Validate input region size. */\r
- if( p_ioctl->in.mem_create.length > ~((size_t)0) )\r
- {\r
- status = IB_INVALID_SETTING;\r
- goto proxy_register_mr_err;\r
- }\r
-\r
- status = reg_mem( h_pd, &p_ioctl->in.mem_create, &p_ioctl->out.lkey,\r
- &p_ioctl->out.rkey, &h_mr, TRUE );\r
-\r
- if( status == IB_SUCCESS )\r
- {\r
- p_ioctl->out.h_mr = h_mr->obj.hdl;\r
- h_mr->obj.hdl_valid = TRUE;\r
- deref_al_obj( &h_mr->obj );\r
- }\r
- else\r
- {\r
-proxy_register_mr_err:\r
- p_ioctl->out.h_mr = AL_INVALID_HANDLE;\r
- p_ioctl->out.lkey = 0;\r
- p_ioctl->out.rkey = 0;\r
- }\r
-\r
- if( h_pd )\r
- deref_al_obj( &h_pd->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_MR );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_QUERY_MEM:\r
- */\r
-static cl_status_t\r
-proxy_query_mr(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_query_mr_ioctl_t *p_ioctl =\r
- (ual_query_mr_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_mr_handle_t h_mr;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_MR );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_MR );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate MR handle */\r
- h_mr = (ib_mr_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_mr, AL_OBJ_TYPE_H_MR );\r
- if( !h_mr )\r
- {\r
- status = IB_INVALID_MR_HANDLE;\r
- goto proxy_query_mr_err;\r
- }\r
-\r
- status = ib_query_mr( h_mr, &p_ioctl->out.attr );\r
-\r
- if( status == IB_SUCCESS )\r
- {\r
- /* Replace the pd handle with proxy's handle */\r
- p_ioctl->out.attr.h_pd =\r
- (ib_pd_handle_t)p_ioctl->out.attr.h_pd->obj.hdl;\r
- }\r
- else\r
- {\r
-proxy_query_mr_err:\r
- cl_memclr( &p_ioctl->out.attr, sizeof(ib_mr_attr_t) );\r
- }\r
-\r
- if( h_mr )\r
- deref_al_obj( &h_mr->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_MR );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_MODIFY_MEM:\r
- */\r
-static cl_status_t\r
-proxy_modify_mr(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_rereg_mem_ioctl_t *p_ioctl =\r
- (ual_rereg_mem_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_mr_handle_t h_mr;\r
- ib_pd_handle_t h_pd = NULL;\r
- ib_mr_create_t *p_mr_create;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_MR );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_MR );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate MR handle */\r
- h_mr = (ib_mr_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_mr, AL_OBJ_TYPE_H_MR );\r
- if( !h_mr )\r
- {\r
- status = IB_INVALID_MR_HANDLE;\r
- goto proxy_modify_mr_err;\r
- }\r
-\r
- /* Validate input region size. */\r
- if( p_ioctl->in.mem_create.length > ~((size_t)0) )\r
- {\r
- status = IB_INVALID_SETTING;\r
- goto proxy_modify_mr_err;\r
- }\r
-\r
- if( p_ioctl->in.mem_mod_mask & IB_MR_MOD_PD )\r
- {\r
- if( !p_ioctl->in.h_pd )\r
- {\r
- status = IB_INVALID_PD_HANDLE;\r
- goto proxy_modify_mr_err;\r
- }\r
- /* This is a modify PD request, validate the PD handle */\r
- h_pd = (ib_pd_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD );\r
- if( !h_pd )\r
- {\r
- status = IB_INVALID_PD_HANDLE;\r
- goto proxy_modify_mr_err;\r
- }\r
- }\r
- else\r
- {\r
- h_pd = NULL;\r
- }\r
-\r
- if( p_ioctl->in.mem_mod_mask != IB_MR_MOD_PD )\r
- p_mr_create = &p_ioctl->in.mem_create;\r
- else\r
- p_mr_create = NULL;\r
-\r
- status = rereg_mem( h_mr, p_ioctl->in.mem_mod_mask,\r
- p_mr_create, &p_ioctl->out.lkey, &p_ioctl->out.rkey, h_pd, TRUE );\r
-\r
- if( status != IB_SUCCESS )\r
- {\r
-proxy_modify_mr_err:\r
- p_ioctl->out.lkey = 0;\r
- p_ioctl->out.rkey = 0;\r
- }\r
-\r
- if( h_pd )\r
- deref_al_obj( &h_pd->obj );\r
-\r
- if( h_mr )\r
- deref_al_obj( &h_mr->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_MR );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_REG_SHARED_MEM:\r
- */\r
-static cl_status_t\r
-proxy_shared_mr(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_reg_shared_ioctl_t *p_ioctl =\r
- (ual_reg_shared_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_pd_handle_t h_pd;\r
- ib_mr_handle_t h_mr, h_cur_mr;\r
- ib_api_status_t status;\r
- uint64_t vaddr;\r
-\r
- AL_ENTER( AL_DBG_MR );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_MR );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /*\r
- * TODO: Must support taking an input handle that isn't\r
- * in this process's context.\r
- */\r
- /* Validate MR handle */\r
- h_cur_mr = (ib_mr_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_mr, AL_OBJ_TYPE_H_MR );\r
- if( !h_cur_mr )\r
- {\r
- h_pd = NULL;\r
- status = IB_INVALID_MR_HANDLE;\r
- goto proxy_shared_mr_err;\r
- }\r
-\r
- /* Validate the PD handle */\r
- h_pd = (ib_pd_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD );\r
- if( !h_pd )\r
- {\r
- status = IB_INVALID_PD_HANDLE;\r
- goto proxy_shared_mr_err;\r
- }\r
-\r
- vaddr = p_ioctl->in.vaddr;\r
- status = reg_shared( h_cur_mr, h_pd,\r
- p_ioctl->in.access_ctrl, &vaddr, &p_ioctl->out.lkey,\r
- &p_ioctl->out.rkey, &h_mr, TRUE );\r
-\r
- if( status == IB_SUCCESS )\r
- {\r
- p_ioctl->out.h_new_mr = h_mr->obj.hdl;\r
- p_ioctl->out.vaddr = vaddr;\r
- h_mr->obj.hdl_valid = TRUE;\r
- deref_al_obj( &h_mr->obj );\r
- }\r
- else\r
- {\r
-proxy_shared_mr_err:\r
- cl_memclr( &p_ioctl->out, sizeof(p_ioctl->out) );\r
- }\r
-\r
- if( h_pd )\r
- deref_al_obj( &h_pd->obj );\r
-\r
- if( h_cur_mr )\r
- deref_al_obj( &h_cur_mr->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_MR );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_DEREGISTER_MEM:\r
- */\r
-static cl_status_t\r
-proxy_deregister_mr(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_dereg_mr_ioctl_t *p_ioctl =\r
- (ual_dereg_mr_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_mr_handle_t h_mr;\r
-\r
- AL_ENTER( AL_DBG_MR );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_MR );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate MR handle */\r
- h_mr = (ib_mr_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_mr, AL_OBJ_TYPE_H_MR );\r
- if( !h_mr )\r
- {\r
- p_ioctl->out.status = IB_INVALID_MR_HANDLE;\r
- AL_EXIT( AL_DBG_MR );\r
- return CL_SUCCESS;\r
- }\r
-\r
- p_ioctl->out.status = dereg_mr( h_mr );\r
-\r
- if( p_ioctl->out.status != IB_SUCCESS )\r
- deref_al_obj( &h_mr->obj );\r
-\r
- AL_EXIT( AL_DBG_MR );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_CREATE_MW:\r
- */\r
-static cl_status_t\r
-proxy_create_mw(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_create_mw_ioctl_t *p_ioctl =\r
- (ual_create_mw_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_pd_handle_t h_pd;\r
- ib_mw_handle_t h_mw;\r
- ci_umv_buf_t *p_umv_buf = NULL;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_MW );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_MW );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate PD handle */\r
- h_pd = (ib_pd_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD );\r
- if( !h_pd )\r
- {\r
- status = IB_INVALID_PD_HANDLE;\r
- goto proxy_create_mw_err;\r
- }\r
-\r
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_create_mw_err;\r
-\r
- status = create_mw( h_pd, &p_ioctl->out.rkey, &h_mw, p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_create_mw_err;\r
-\r
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
- if( status == IB_SUCCESS )\r
- {\r
- p_ioctl->out.h_mw = h_mw->obj.hdl;\r
- h_mw->obj.hdl_valid = TRUE;\r
- deref_al_obj( &h_mw->obj );\r
- }\r
- else\r
- {\r
- h_mw->obj.pfn_destroy( &h_mw->obj, NULL );\r
-\r
-proxy_create_mw_err:\r
- p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
- p_ioctl->out.rkey = 0;\r
- p_ioctl->out.h_mw = AL_INVALID_HANDLE;\r
- }\r
- free_umvbuf( p_umv_buf );\r
-\r
- if( h_pd )\r
- deref_al_obj( &h_pd->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_MW );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_QUERY_MW:\r
- */\r
-static cl_status_t\r
-proxy_query_mw(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_query_mw_ioctl_t *p_ioctl =\r
- (ual_query_mw_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_mw_handle_t h_mw;\r
- ci_umv_buf_t *p_umv_buf = NULL;\r
- ib_api_status_t status;\r
- ib_pd_handle_t h_pd;\r
-\r
- AL_ENTER( AL_DBG_MW );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_MW );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate MW handle */\r
- h_mw = (ib_mw_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_mw, AL_OBJ_TYPE_H_MW );\r
- if( !h_mw )\r
- {\r
- status = IB_INVALID_MW_HANDLE;\r
- goto proxy_query_mw_err;\r
- }\r
-\r
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_query_mw_err;\r
-\r
- status = query_mw( h_mw, &h_pd, &p_ioctl->out.rkey, p_umv_buf );\r
-\r
- if( status != IB_SUCCESS )\r
- goto proxy_query_mw_err;\r
-\r
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
- if( status == IB_SUCCESS )\r
- {\r
- /*\r
- * replace the pd handle with user's pd context for the proxy's PD.\r
- */\r
- p_ioctl->out.pd_context = (void*)h_pd->obj.context;\r
- }\r
- else\r
- {\r
-proxy_query_mw_err:\r
- p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
- p_ioctl->out.pd_context = NULL;\r
- p_ioctl->out.rkey = 0;\r
- }\r
- free_umvbuf( p_umv_buf );\r
-\r
- if( h_mw )\r
- deref_al_obj( &h_mw->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_MW );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_BIND_MW:\r
- */\r
-static cl_status_t\r
-proxy_bind_mw(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_bind_mw_ioctl_t *p_ioctl =\r
- (ual_bind_mw_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_mw_handle_t h_mw;\r
- ib_qp_handle_t h_qp;\r
- ib_mr_handle_t h_mr;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_MW );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_MW );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate MW handle */\r
- h_mw = (ib_mw_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_mw, AL_OBJ_TYPE_H_MW );\r
- if( !h_mw )\r
- {\r
- status = IB_INVALID_MW_HANDLE;\r
- goto proxy_bind_mw_err1;\r
- }\r
-\r
- /* Validate QP handle */\r
- h_qp = (ib_qp_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP );\r
- if( !h_qp )\r
- {\r
- status = IB_INVALID_QP_HANDLE;\r
- goto proxy_bind_mw_err2;\r
- }\r
-\r
- /* Validate MR handle */\r
- h_mr = (ib_mr_handle_t)al_hdl_ref( p_context->h_al,\r
- (uint64_t)p_ioctl->in.mw_bind.h_mr, AL_OBJ_TYPE_H_MR );\r
- if( !h_mr )\r
- {\r
- status = IB_INVALID_MR_HANDLE;\r
- goto proxy_bind_mw_err3;\r
- }\r
-\r
- /* Update bind attribute with the kernel space handles */\r
- p_ioctl->in.mw_bind.h_mr = h_mr;\r
-\r
- status = ib_bind_mw( h_mw, h_qp,\r
- &p_ioctl->in.mw_bind, &p_ioctl->out.r_key );\r
-\r
- deref_al_obj( &h_mr->obj );\r
-proxy_bind_mw_err3:\r
- deref_al_obj( &h_qp->obj );\r
-proxy_bind_mw_err2:\r
- deref_al_obj( &h_mw->obj );\r
-proxy_bind_mw_err1:\r
-\r
- if( status != IB_SUCCESS )\r
- p_ioctl->out.r_key = 0;\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_MW );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process the ioctl UAL_DESTROY_MW:\r
- */\r
-static cl_status_t\r
-proxy_destroy_mw(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_destroy_mw_ioctl_t *p_ioctl =\r
- (ual_destroy_mw_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_mw_handle_t h_mw;\r
-\r
- AL_ENTER( AL_DBG_MW );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_MW );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate MW handle */\r
- h_mw = (ib_mw_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_mw, AL_OBJ_TYPE_H_MW );\r
- if( !h_mw )\r
- {\r
- p_ioctl->out.status = IB_INVALID_MW_HANDLE;\r
- AL_EXIT( AL_DBG_MW );\r
- return CL_SUCCESS;\r
- }\r
- p_ioctl->out.status = destroy_mw( h_mw );\r
-\r
- if( p_ioctl->out.status != IB_SUCCESS )\r
- deref_al_obj( &h_mw->obj );\r
-\r
- AL_EXIT( AL_DBG_MW );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-cl_status_t\r
-proxy_get_spl_qp(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_spl_qp_ioctl_t *p_ioctl =\r
- (ual_spl_qp_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_pd_handle_t h_pd;\r
- ib_qp_handle_t h_qp;\r
- ci_umv_buf_t *p_umv_buf = NULL;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_QP );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_QP );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate pd handle */\r
- h_pd = (ib_pd_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD );\r
- if( !h_pd )\r
- {\r
- status = IB_INVALID_PD_HANDLE;\r
- goto proxy_get_spl_qp_err;\r
- }\r
-\r
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_get_spl_qp_err;\r
-\r
- /* We obtain the pool_key separately from the special QP. */\r
- status = get_spl_qp( h_pd, p_ioctl->in.port_guid,\r
- &p_ioctl->in.qp_create, p_ioctl->in.context, proxy_qp_err_cb, NULL, &h_qp, p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_get_spl_qp_err;\r
-\r
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
- if( status == IB_SUCCESS )\r
- {\r
- p_ioctl->out.h_qp = h_qp->obj.hdl;\r
- h_qp->obj.hdl_valid = TRUE;\r
- deref_al_obj( &h_qp->obj );\r
- }\r
- else\r
- {\r
- h_qp->obj.pfn_destroy( &h_qp->obj, NULL );\r
-\r
-proxy_get_spl_qp_err:\r
- p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
- p_ioctl->out.h_qp = AL_INVALID_HANDLE;\r
- }\r
- free_umvbuf( p_umv_buf );\r
-\r
- if( h_pd )\r
- deref_al_obj( &h_pd->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_QP );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-static cl_status_t\r
-proxy_attach_mcast(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_attach_mcast_ioctl_t *p_ioctl =\r
- (ual_attach_mcast_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- ib_qp_handle_t h_qp;\r
- al_attach_handle_t h_attach;\r
- ci_umv_buf_t *p_umv_buf = NULL;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_MCAST );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_MCAST );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Validate QP handle */\r
- h_qp = (ib_qp_handle_t)\r
- al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP );\r
- if( !h_qp )\r
- {\r
- status = IB_INVALID_QP_HANDLE;\r
- goto proxy_attach_mcast_err;\r
- }\r
-\r
- status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_attach_mcast_err;\r
-\r
- status = al_attach_mcast( h_qp, &p_ioctl->in.mgid,\r
- p_ioctl->in.mlid, &h_attach, p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- goto proxy_attach_mcast_err;\r
-\r
- status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf );\r
- if( status == IB_SUCCESS )\r
- {\r
- p_ioctl->out.h_attach = h_attach->obj.hdl;\r
- h_attach->obj.hdl_valid = TRUE;\r
- deref_al_obj( &h_attach->obj );\r
- }\r
- else\r
- {\r
- h_attach->obj.pfn_destroy( &h_attach->obj, NULL );\r
-\r
-proxy_attach_mcast_err:\r
- p_ioctl->out.umv_buf = p_ioctl->in.umv_buf;\r
- p_ioctl->out.h_attach = AL_INVALID_HANDLE;\r
- }\r
- free_umvbuf( p_umv_buf );\r
-\r
- if( h_qp )\r
- deref_al_obj( &h_qp->obj );\r
-\r
- p_ioctl->out.status = status;\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- AL_EXIT( AL_DBG_MCAST );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-static cl_status_t\r
-proxy_detach_mcast(\r
- IN void *p_open_context,\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- ual_detach_mcast_ioctl_t *p_ioctl =\r
- (ual_detach_mcast_ioctl_t *)cl_ioctl_in_buf( h_ioctl );\r
- al_dev_open_context_t *p_context =\r
- (al_dev_open_context_t *)p_open_context;\r
- al_attach_handle_t h_attach;\r
-\r
- AL_ENTER( AL_DBG_MCAST );\r
-\r
- /* Validate input buffers. */\r
- if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||\r
- cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||\r
- cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )\r
- {\r
- AL_EXIT( AL_DBG_MCAST );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- /* Set the return bytes in all cases */\r
- *p_ret_bytes = sizeof(p_ioctl->out);\r
-\r
- /* Validate mcast handle */\r
- h_attach = (al_attach_handle_t)al_hdl_ref(\r
- p_context->h_al, p_ioctl->in.h_attach, AL_OBJ_TYPE_H_ATTACH );\r
- if( !h_attach )\r
- {\r
- p_ioctl->out.status = IB_INVALID_MCAST_HANDLE;\r
- AL_EXIT( AL_DBG_MCAST );\r
- return CL_SUCCESS;\r
- }\r
-\r
- h_attach->obj.pfn_destroy( &h_attach->obj, ib_sync_destroy );\r
- p_ioctl->out.status = IB_SUCCESS;\r
-\r
- AL_EXIT( AL_DBG_MCAST );\r
- return CL_SUCCESS;\r
-}\r
-\r
-\r
-\r
-cl_status_t\r
-verbs_ioctl(\r
- IN cl_ioctl_handle_t h_ioctl,\r
- OUT size_t *p_ret_bytes )\r
-{\r
- cl_status_t cl_status;\r
- IO_STACK_LOCATION *p_io_stack;\r
- void *p_context;\r
-\r
- AL_ENTER( AL_DBG_DEV );\r
-\r
- p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );\r
- p_context = p_io_stack->FileObject->FsContext;\r
-\r
- if( !p_context )\r
- {\r
- AL_EXIT( AL_DBG_DEV );\r
- return CL_INVALID_PARAMETER;\r
- }\r
-\r
- switch( cl_ioctl_ctl_code( h_ioctl ) )\r
- {\r
- case UAL_OPEN_CA:\r
- cl_status = proxy_open_ca( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_QUERY_CA:\r
- cl_status = proxy_query_ca( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_MODIFY_CA:\r
- cl_status = proxy_modify_ca( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CI_CALL:\r
- cl_status = proxy_ci_call( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_ALLOC_PD:\r
- cl_status = proxy_alloc_pd( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CREATE_AV:\r
- cl_status = proxy_create_av( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_QUERY_AV:\r
- cl_status = proxy_query_av( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_MODIFY_AV:\r
- cl_status = proxy_modify_av( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CREATE_SRQ:\r
- cl_status = proxy_create_srq( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_QUERY_SRQ:\r
- cl_status = proxy_query_srq( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_MODIFY_SRQ:\r
- cl_status = proxy_modify_srq( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_DESTROY_SRQ:\r
- cl_status = proxy_destroy_srq( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_POST_SRQ_RECV:\r
- cl_status = proxy_post_srq_recv( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CREATE_QP:\r
- cl_status = proxy_create_qp( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_QUERY_QP:\r
- cl_status = proxy_query_qp( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_MODIFY_QP:\r
- cl_status = proxy_modify_qp( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CREATE_CQ:\r
- cl_status = proxy_create_cq( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_QUERY_CQ:\r
- cl_status = proxy_query_cq( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_MODIFY_CQ:\r
- cl_status = proxy_modify_cq( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_REG_MR:\r
- cl_status = proxy_register_mr( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_QUERY_MR:\r
- cl_status = proxy_query_mr( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_MODIFY_MR:\r
- cl_status = proxy_modify_mr( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_REG_SHARED:\r
- cl_status = proxy_shared_mr( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CREATE_MW:\r
- cl_status = proxy_create_mw( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_QUERY_MW:\r
- cl_status = proxy_query_mw( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_BIND_MW:\r
- cl_status = proxy_bind_mw( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_POST_SEND:\r
- cl_status = proxy_post_send( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_POST_RECV:\r
- cl_status = proxy_post_recv( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_PEEK_CQ:\r
- cl_status = proxy_peek_cq( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_POLL_CQ:\r
- cl_status = proxy_poll_cq( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_REARM_CQ:\r
- cl_status = proxy_rearm_cq( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_REARM_N_CQ:\r
- cl_status = proxy_rearm_n_cq( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_ATTACH_MCAST:\r
- cl_status = proxy_attach_mcast( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_GET_SPL_QP_ALIAS:\r
- cl_status = proxy_get_spl_qp( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_CLOSE_CA:\r
- cl_status = proxy_close_ca( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_DEALLOC_PD:\r
- cl_status = proxy_dealloc_pd( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_DESTROY_AV:\r
- cl_status = proxy_destroy_av( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_DESTROY_QP:\r
- cl_status = proxy_destroy_qp( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_DESTROY_CQ:\r
- cl_status = proxy_destroy_cq( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_DEREG_MR:\r
- cl_status = proxy_deregister_mr( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_DESTROY_MW:\r
- cl_status = proxy_destroy_mw( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_DETACH_MCAST:\r
- cl_status = proxy_detach_mcast( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- case UAL_GET_VENDOR_LIBCFG:\r
- cl_status =\r
- proxy_get_vendor_libcfg( p_context, h_ioctl, p_ret_bytes );\r
- break;\r
- default:\r
- cl_status = CL_INVALID_PARAMETER;\r
- break;\r
- }\r
-\r
- AL_EXIT( AL_DBG_DEV );\r
- return cl_status;\r
-}\r
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_sa_req.c 552 2006-11-28 07:37:23Z sleybo $\r
- */\r
-\r
-#include <iba/ib_al.h>\r
-#include <complib/cl_timer.h>\r
-\r
-#include "al.h"\r
-#include "al_ca.h"\r
-#include "al_common.h"\r
-#include "al_debug.h"\r
-#if defined(EVENT_TRACING)\r
-#ifdef offsetof\r
-#undef offsetof\r
-#endif\r
-#include "al_sa_req.tmh"\r
-#endif\r
-#include "al_mgr.h"\r
-#include "al_query.h"\r
-#include "ib_common.h"\r
-\r
-\r
-/* Global SA request manager */\r
-typedef struct _sa_req_mgr\r
-{\r
- al_obj_t obj; /* Child of gp_al_mgr */\r
- ib_pnp_handle_t h_pnp; /* Handle for CA PnP events */\r
-\r
-} sa_req_mgr_t;\r
-\r
-\r
-static sa_req_mgr_t *gp_sa_req_mgr = NULL;\r
-\r
-\r
-\r
-/*\r
- * Function prototypes.\r
- */\r
-void\r
-destroying_sa_req_mgr(\r
- IN al_obj_t* p_obj );\r
-\r
-void\r
-free_sa_req_mgr(\r
- IN al_obj_t* p_obj );\r
-\r
-ib_api_status_t\r
-sa_req_mgr_pnp_cb(\r
- IN ib_pnp_rec_t* p_pnp_rec );\r
-\r
-ib_api_status_t\r
-create_sa_req_svc(\r
- IN ib_pnp_port_rec_t* p_pnp_rec );\r
-\r
-void\r
-destroying_sa_req_svc(\r
- IN al_obj_t* p_obj );\r
-\r
-void\r
-free_sa_req_svc(\r
- IN al_obj_t* p_obj );\r
-\r
-ib_api_status_t\r
-init_sa_req_svc(\r
- IN sa_req_svc_t* p_sa_req_svc,\r
- IN const ib_pnp_port_rec_t *p_pnp_rec );\r
-\r
-void\r
-sa_req_send_comp_cb(\r
- IN const ib_mad_svc_handle_t h_mad_svc,\r
- IN void *mad_svc_context,\r
- IN ib_mad_element_t *p_mad );\r
-\r
-void\r
-sa_req_recv_comp_cb(\r
- IN const ib_mad_svc_handle_t h_mad_svc,\r
- IN void *mad_svc_context,\r
- IN ib_mad_element_t *p_mad );\r
-\r
-void\r
-sa_req_svc_event_cb(\r
- IN ib_async_event_rec_t *p_event_rec );\r
-\r
-\r
-/*\r
- * Create the sa_req manager.\r
- */\r
-ib_api_status_t\r
-create_sa_req_mgr(\r
- IN al_obj_t* const p_parent_obj )\r
-{\r
- ib_pnp_req_t pnp_req;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SA_REQ );\r
- CL_ASSERT( p_parent_obj );\r
- CL_ASSERT( gp_sa_req_mgr == NULL );\r
-\r
- gp_sa_req_mgr = cl_zalloc( sizeof( sa_req_mgr_t ) );\r
- if( gp_sa_req_mgr == NULL )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("cl_zalloc failed\n") );\r
- return IB_INSUFFICIENT_MEMORY;\r
- }\r
-\r
- /* Construct the sa_req manager. */\r
- construct_al_obj( &gp_sa_req_mgr->obj, AL_OBJ_TYPE_SA_REQ_SVC );\r
-\r
- /* Initialize the global sa_req manager object. */\r
- status = init_al_obj( &gp_sa_req_mgr->obj, gp_sa_req_mgr, TRUE,\r
- destroying_sa_req_mgr, NULL, free_sa_req_mgr );\r
- if( status != IB_SUCCESS )\r
- {\r
- free_sa_req_mgr( &gp_sa_req_mgr->obj );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("cl_spinlock_init failed\n") );\r
- return status;\r
- }\r
- status = attach_al_obj( p_parent_obj, &gp_sa_req_mgr->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- gp_sa_req_mgr->obj.pfn_destroy( &gp_sa_req_mgr->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- /* Register for CA PnP events. */\r
- cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
- pnp_req.pnp_class = IB_PNP_PORT;\r
- pnp_req.pnp_context = &gp_sa_req_mgr->obj;\r
- pnp_req.pfn_pnp_cb = sa_req_mgr_pnp_cb;\r
-\r
- status = ib_reg_pnp( gh_al, &pnp_req, &gp_sa_req_mgr->h_pnp );\r
- if (status != IB_SUCCESS)\r
- {\r
- gp_sa_req_mgr->obj.pfn_destroy( &gp_sa_req_mgr->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_reg_pnp failed: %s\n", ib_get_err_str( status ) ) );\r
- return status;\r
- }\r
-\r
- /*\r
- * Note that we don't release the reference from init_al_obj because\r
- * we need a reference on behalf of the ib_reg_pnp call. This avoids\r
- * a call to ref_al_obj and deref_al_obj.\r
- */\r
-\r
- AL_EXIT( AL_DBG_SA_REQ );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Pre-destroy the sa_req manager.\r
- */\r
-void\r
-destroying_sa_req_mgr(\r
- IN al_obj_t* p_obj )\r
-{\r
- ib_api_status_t status;\r
-\r
- CL_ASSERT( p_obj );\r
- CL_ASSERT( gp_sa_req_mgr == PARENT_STRUCT( p_obj, sa_req_mgr_t, obj ) );\r
- UNUSED_PARAM( p_obj );\r
-\r
- /* Deregister for PnP events. */\r
- if( gp_sa_req_mgr->h_pnp )\r
- {\r
- status = ib_dereg_pnp( gp_sa_req_mgr->h_pnp,\r
- (ib_pfn_destroy_cb_t)deref_al_obj );\r
- CL_ASSERT( status == IB_SUCCESS );\r
- }\r
-}\r
-\r
-\r
-\r
-/*\r
- * Free the sa_req manager.\r
- */\r
-void\r
-free_sa_req_mgr(\r
- IN al_obj_t* p_obj )\r
-{\r
- CL_ASSERT( p_obj );\r
- CL_ASSERT( gp_sa_req_mgr == PARENT_STRUCT( p_obj, sa_req_mgr_t, obj ) );\r
- UNUSED_PARAM( p_obj );\r
-\r
- destroy_al_obj( &gp_sa_req_mgr->obj );\r
- cl_free( gp_sa_req_mgr );\r
- gp_sa_req_mgr = NULL;\r
-}\r
-\r
-\r
-\r
-/*\r
- * SA request manager PnP event callback.\r
- */\r
-ib_api_status_t\r
-sa_req_mgr_pnp_cb(\r
- IN ib_pnp_rec_t* p_pnp_rec )\r
-{\r
- sa_req_svc_t *p_sa_req_svc;\r
- ib_av_attr_t av_attr;\r
- ib_pd_handle_t h_pd;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SA_REQ );\r
- CL_ASSERT( p_pnp_rec );\r
- CL_ASSERT( p_pnp_rec->pnp_context == &gp_sa_req_mgr->obj );\r
-\r
- /* Dispatch based on the PnP event type. */\r
- switch( p_pnp_rec->pnp_event )\r
- {\r
- case IB_PNP_PORT_ADD:\r
- status = create_sa_req_svc( (ib_pnp_port_rec_t*)p_pnp_rec );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("create_sa_req_svc failed: %s\n", ib_get_err_str(status)) );\r
- }\r
- break;\r
-\r
- case IB_PNP_PORT_REMOVE:\r
- CL_ASSERT( p_pnp_rec->context );\r
- p_sa_req_svc = p_pnp_rec->context;\r
- ref_al_obj( &p_sa_req_svc->obj );\r
- p_sa_req_svc->obj.pfn_destroy( &p_sa_req_svc->obj, NULL );\r
- p_pnp_rec->context = NULL;\r
- status = IB_SUCCESS;\r
- break;\r
-\r
- case IB_PNP_PORT_ACTIVE:\r
- case IB_PNP_SM_CHANGE:\r
- CL_ASSERT( p_pnp_rec->context );\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_SA_REQ,\r
- ("updating SM information\n") );\r
-\r
- p_sa_req_svc = p_pnp_rec->context;\r
- p_sa_req_svc->sm_lid =\r
- ((ib_pnp_port_rec_t*)p_pnp_rec)->p_port_attr->sm_lid;\r
- p_sa_req_svc->sm_sl =\r
- ((ib_pnp_port_rec_t*)p_pnp_rec)->p_port_attr->sm_sl;\r
-\r
- /* Update the address vector. */\r
- status = ib_query_av( p_sa_req_svc->h_av, &av_attr, &h_pd );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("AV query failed: %s\n", ib_get_err_str(status)) );\r
- status = IB_SUCCESS;\r
- break;\r
- }\r
-\r
- av_attr.dlid = p_sa_req_svc->sm_lid;\r
- av_attr.sl = p_sa_req_svc->sm_sl;\r
- status = ib_modify_av( p_sa_req_svc->h_av, &av_attr );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("modify AV failed: %s\n", ib_get_err_str(status) ) );\r
- status = IB_SUCCESS;\r
- break;\r
- }\r
- break;\r
-\r
- case IB_PNP_PORT_INIT:\r
- case IB_PNP_PORT_ARMED:\r
- case IB_PNP_PORT_DOWN:\r
- CL_ASSERT( p_pnp_rec->context );\r
- p_sa_req_svc = p_pnp_rec->context;\r
- p_sa_req_svc->sm_lid = 0;\r
- p_sa_req_svc->sm_sl = 0;\r
-\r
- default:\r
- status = IB_SUCCESS;\r
- break;\r
- }\r
- AL_EXIT( AL_DBG_SA_REQ );\r
- return status;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Create an sa_req service.\r
- */\r
-ib_api_status_t\r
-create_sa_req_svc(\r
- IN ib_pnp_port_rec_t* p_pnp_rec )\r
-{\r
- sa_req_svc_t* p_sa_req_svc;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SA_REQ );\r
- CL_ASSERT( p_pnp_rec );\r
- CL_ASSERT( p_pnp_rec->p_ca_attr );\r
- CL_ASSERT( p_pnp_rec->p_port_attr );\r
-\r
- p_sa_req_svc = cl_zalloc( sizeof( sa_req_svc_t ) );\r
- if( p_sa_req_svc == NULL )\r
- return IB_INSUFFICIENT_MEMORY;\r
-\r
- /* Construct the sa_req service object. */\r
- construct_al_obj( &p_sa_req_svc->obj, AL_OBJ_TYPE_SA_REQ_SVC );\r
-\r
- /* Initialize the sa_req service object. */\r
- status = init_al_obj( &p_sa_req_svc->obj, p_sa_req_svc, TRUE,\r
- destroying_sa_req_svc, NULL, free_sa_req_svc );\r
- if( status != IB_SUCCESS )\r
- {\r
- free_sa_req_svc( &p_sa_req_svc->obj );\r
- return status;\r
- }\r
-\r
- /* Attach to the sa_req_mgr. */\r
- status = attach_al_obj( &gp_sa_req_mgr->obj, &p_sa_req_svc->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_sa_req_svc->obj.pfn_destroy( &p_sa_req_svc->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- /* Allocate a QP alias and MAD service to send queries on. */\r
- status = init_sa_req_svc( p_sa_req_svc, p_pnp_rec );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_sa_req_svc->obj.pfn_destroy( &p_sa_req_svc->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("init_sa_req_svc failed: %s\n", ib_get_err_str(status) ) );\r
- return status;\r
- }\r
-\r
- /* Set the context of the PnP event to this child object. */\r
- p_pnp_rec->pnp_rec.context = p_sa_req_svc;\r
-\r
- /* Release the reference taken in init_al_obj. */\r
- deref_al_obj( &p_sa_req_svc->obj );\r
-\r
- AL_EXIT( AL_DBG_SA_REQ );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Pre-destroy a sa_req service.\r
- */\r
-void\r
-destroying_sa_req_svc(\r
- IN al_obj_t* p_obj )\r
-{\r
- sa_req_svc_t* p_sa_req_svc;\r
- ib_api_status_t status;\r
-\r
- CL_ASSERT( p_obj );\r
- p_sa_req_svc = PARENT_STRUCT( p_obj, sa_req_svc_t, obj );\r
-\r
- /* Destroy the AV. */\r
- if( p_sa_req_svc->h_av )\r
- ib_destroy_av( p_sa_req_svc->h_av );\r
-\r
- /* Destroy the QP. */\r
- if( p_sa_req_svc->h_qp )\r
- {\r
- status = ib_destroy_qp( p_sa_req_svc->h_qp,\r
- (ib_pfn_destroy_cb_t)deref_al_obj );\r
- CL_ASSERT( status == IB_SUCCESS );\r
- }\r
-}\r
-\r
-\r
-\r
-/*\r
- * Free a sa_req service.\r
- */\r
-void\r
-free_sa_req_svc(\r
- IN al_obj_t* p_obj )\r
-{\r
- sa_req_svc_t* p_sa_req_svc;\r
-\r
- CL_ASSERT( p_obj );\r
- p_sa_req_svc = PARENT_STRUCT( p_obj, sa_req_svc_t, obj );\r
-\r
- destroy_al_obj( p_obj );\r
- cl_free( p_sa_req_svc );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Initialize an sa_req service.\r
- */\r
-ib_api_status_t\r
-init_sa_req_svc(\r
- IN sa_req_svc_t *p_sa_req_svc,\r
- IN const ib_pnp_port_rec_t *p_pnp_rec )\r
-{\r
- ib_qp_create_t qp_create;\r
- ib_mad_svc_t mad_svc;\r
- ib_api_status_t status;\r
- ib_ca_handle_t h_ca;\r
- ib_av_attr_t av_attr;\r
-\r
- AL_ENTER( AL_DBG_SA_REQ );\r
- CL_ASSERT( p_sa_req_svc && p_pnp_rec );\r
-\r
- /* Acquire the correct CI CA for this port. */\r
- h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid );\r
- if( !h_ca )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_SA_REQ, ("Failed to acquire CA\n") );\r
- return IB_INVALID_GUID;\r
- }\r
- p_sa_req_svc->obj.p_ci_ca = h_ca->obj.p_ci_ca;\r
-\r
- /* Record which port this service operates on. */\r
- p_sa_req_svc->port_guid = p_pnp_rec->p_port_attr->port_guid;\r
- p_sa_req_svc->port_num = p_pnp_rec->p_port_attr->port_num;\r
- p_sa_req_svc->sm_lid = p_pnp_rec->p_port_attr->sm_lid;\r
- p_sa_req_svc->sm_sl = p_pnp_rec->p_port_attr->sm_sl;\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_SA_REQ,\r
- ("using port: 0x%x\tsm lid: 0x%x\tsm sl: 0x%x\n",\r
- p_sa_req_svc->port_num, p_sa_req_svc->sm_lid, p_sa_req_svc->sm_sl) );\r
-\r
- /* Create the QP. */\r
- cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
- qp_create.qp_type = IB_QPT_QP1_ALIAS;\r
- qp_create.sq_depth = p_pnp_rec->p_ca_attr->max_wrs;\r
- qp_create.rq_depth = 0;\r
- qp_create.sq_sge = 1;\r
- qp_create.rq_sge = 0;\r
- qp_create.h_sq_cq = NULL;\r
- qp_create.h_rq_cq = NULL;\r
- qp_create.sq_signaled = TRUE;\r
-\r
- status = ib_get_spl_qp( h_ca->obj.p_ci_ca->h_pd_alias,\r
- p_sa_req_svc->port_guid, &qp_create, p_sa_req_svc,\r
- sa_req_svc_event_cb, &p_sa_req_svc->pool_key, &p_sa_req_svc->h_qp );\r
-\r
- /*\r
- * Release the CI CA once we've allocated the QP. The CI CA will not\r
- * go away while we hold the QP.\r
- */\r
- deref_al_obj( &h_ca->obj );\r
-\r
- /* Check for failure allocating the QP. */\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("failed to create QP1 alias: %s\n", ib_get_err_str(status) ) );\r
- return status;\r
- }\r
-\r
- /* Reference the sa_req service on behalf of QP alias. */\r
- ref_al_obj( &p_sa_req_svc->obj );\r
-\r
- /* Create a MAD service. */\r
- cl_memclr( &mad_svc, sizeof( ib_mad_svc_t ) );\r
- mad_svc.mad_svc_context = p_sa_req_svc;\r
- mad_svc.pfn_mad_send_cb = sa_req_send_comp_cb;\r
- mad_svc.pfn_mad_recv_cb = sa_req_recv_comp_cb;\r
-\r
- status = ib_reg_mad_svc( p_sa_req_svc->h_qp, &mad_svc,\r
- &p_sa_req_svc->h_mad_svc );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("failed to register MAD service: %s\n", ib_get_err_str(status) ) );\r
- return status;\r
- }\r
-\r
- /* Create an address vector for the SA. */\r
- av_attr.port_num = p_sa_req_svc->port_num;\r
- av_attr.sl = p_sa_req_svc->sm_sl;\r
- av_attr.dlid = 1;\r
- av_attr.grh_valid = FALSE;\r
- av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS;\r
- av_attr.path_bits = 0;\r
-\r
- status = ib_create_av( p_sa_req_svc->obj.p_ci_ca->h_pd_alias,\r
- &av_attr, &p_sa_req_svc->h_av );\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("failed to create AV: %s\n", ib_get_err_str(status) ) );\r
- return status;\r
- }\r
-\r
- AL_EXIT( AL_DBG_SA_REQ );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * SA request service asynchronous event callback. Our QP is an alias,\r
- * so if we've received an error, the QP is unusable.\r
- */\r
-void\r
-sa_req_svc_event_cb(\r
- IN ib_async_event_rec_t *p_event_rec )\r
-{\r
- sa_req_svc_t *p_sa_req_svc;\r
-\r
- CL_ASSERT( p_event_rec );\r
- CL_ASSERT( p_event_rec->context );\r
-\r
- p_sa_req_svc = p_event_rec->context;\r
- ref_al_obj( &p_sa_req_svc->obj );\r
- p_sa_req_svc->obj.pfn_destroy( &p_sa_req_svc->obj, NULL );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Acquire the sa_req service for the given port.\r
- */\r
-static sa_req_svc_t*\r
-acquire_sa_req_svc(\r
- IN const ib_net64_t port_guid )\r
-{\r
- cl_list_item_t *p_list_item;\r
- sa_req_svc_t *p_sa_req_svc;\r
- al_obj_t *p_obj;\r
-\r
- CL_ASSERT( gp_sa_req_mgr );\r
-\r
- /* Search for the sa_req service for the given port. */\r
- cl_spinlock_acquire( &gp_sa_req_mgr->obj.lock );\r
- for( p_list_item = cl_qlist_head( &gp_sa_req_mgr->obj.obj_list );\r
- p_list_item != cl_qlist_end( &gp_sa_req_mgr->obj.obj_list );\r
- p_list_item = cl_qlist_next( p_list_item ) )\r
- {\r
- p_obj = PARENT_STRUCT( p_list_item, al_obj_t, pool_item );\r
- p_sa_req_svc = PARENT_STRUCT( p_obj, sa_req_svc_t, obj );\r
-\r
- /* Make sure that the REQ service isn't being destroyed. */\r
- if( p_sa_req_svc->obj.state != CL_INITIALIZED || !p_sa_req_svc->sm_lid )\r
- continue;\r
-\r
- /* Check for a port match. */\r
- if( p_sa_req_svc->port_guid == port_guid )\r
- {\r
- /* Reference the service on behalf of the client. */\r
- ref_al_obj( &p_sa_req_svc->obj );\r
- cl_spinlock_release( &gp_sa_req_mgr->obj.lock );\r
- return p_sa_req_svc;\r
- }\r
- }\r
- cl_spinlock_release( &gp_sa_req_mgr->obj.lock );\r
-\r
- return NULL;\r
-}\r
-\r
-\r
-\r
-ib_api_status_t\r
-al_send_sa_req(\r
- IN al_sa_req_t *p_sa_req,\r
- IN const net64_t port_guid,\r
- IN const uint32_t timeout_ms,\r
- IN const uint32_t retry_cnt,\r
- IN const ib_user_query_t* const p_sa_req_data,\r
- IN const ib_al_flags_t flags )\r
-{\r
- ib_api_status_t status;\r
- sa_req_svc_t *p_sa_req_svc;\r
- ib_mad_element_t *p_mad_request;\r
- ib_mad_t *p_mad_hdr;\r
- ib_sa_mad_t *p_sa_mad;\r
- KEVENT event;\r
-\r
- AL_ENTER( AL_DBG_SA_REQ );\r
-\r
- if( flags & IB_FLAGS_SYNC )\r
- {\r
- if( !cl_is_blockable() )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Thread context not blockable\n") );\r
- return IB_INVALID_SETTING;\r
- }\r
-\r
- KeInitializeEvent( &event, NotificationEvent, FALSE );\r
- p_sa_req->p_sync_event = &event;\r
- }\r
- else\r
- {\r
- p_sa_req->p_sync_event = NULL;\r
- }\r
-\r
- /* Locate the sa_req service to issue the sa_req on. */\r
- p_sa_req->p_sa_req_svc = acquire_sa_req_svc( port_guid );\r
- if( !p_sa_req->p_sa_req_svc )\r
- {\r
- AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_QUERY, ("invalid port GUID\n") );\r
- return IB_INVALID_GUID;\r
- }\r
-\r
- /* Get a MAD element for the send request. */\r
- p_sa_req_svc = p_sa_req->p_sa_req_svc;\r
- status = ib_get_mad( p_sa_req_svc->pool_key, MAD_BLOCK_SIZE,\r
- &p_mad_request );\r
- if( status != IB_SUCCESS )\r
- {\r
- deref_al_obj( &p_sa_req_svc->obj );\r
- AL_EXIT( AL_DBG_SA_REQ );\r
- return status;\r
- }\r
-\r
- /* Store the MAD request so it can be cancelled. */\r
- p_sa_req->p_mad_request = p_mad_request;\r
-\r
- /* Initialize the MAD buffer for the send operation. */\r
- p_mad_hdr = p_sa_req->p_mad_request->p_mad_buf;\r
- p_sa_mad = (ib_sa_mad_t*)p_mad_hdr;\r
-\r
- /* Initialize the standard MAD header. */\r
- ib_mad_init_new( p_mad_hdr, IB_MCLASS_SUBN_ADM, (uint8_t)2,\r
- p_sa_req_data->method,\r
- cl_hton64( (uint64_t)cl_atomic_inc( &p_sa_req_svc->trans_id ) ),\r
- 0, 0 );\r
-\r
- /* Set the query information. */\r
- p_sa_mad->attr_id = p_sa_req_data->attr_id;\r
- p_sa_mad->attr_offset = ib_get_attr_offset( p_sa_req_data->attr_size );\r
- p_sa_mad->comp_mask = p_sa_req_data->comp_mask;\r
- /*\r
- * Most set operations don't use the component mask.\r
- * Check the method and copy the attributes if it's a set too.\r
- */\r
- if( p_sa_mad->comp_mask || p_sa_mad->method == IB_MAD_METHOD_SET )\r
- {\r
- cl_memcpy( p_sa_mad->data, p_sa_req_data->p_attr,\r
- p_sa_req_data->attr_size );\r
- }\r
-\r
- /* Set the MAD element information. */\r
- p_mad_request->context1 = p_sa_req;\r
- p_mad_request->send_context1 = p_sa_req;\r
- p_mad_request->remote_qp = IB_QP1;\r
- p_mad_request->h_av = p_sa_req_svc->h_av;\r
- p_mad_request->send_opt = 0;\r
- p_mad_request->remote_qkey = IB_QP1_WELL_KNOWN_Q_KEY;\r
- p_mad_request->resp_expected = TRUE;\r
- p_mad_request->timeout_ms = timeout_ms;\r
- p_mad_request->retry_cnt = retry_cnt;\r
-\r
- status = ib_send_mad( p_sa_req_svc->h_mad_svc, p_mad_request, NULL );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_sa_req->p_mad_request = NULL;\r
- ib_put_mad( p_mad_request );\r
- deref_al_obj( &p_sa_req->p_sa_req_svc->obj );\r
- }\r
- else if( flags & IB_FLAGS_SYNC )\r
- {\r
- /* Wait for the MAD completion. */\r
- KeWaitForSingleObject( &event, Executive, KernelMode, FALSE, NULL );\r
- }\r
-\r
- AL_EXIT( AL_DBG_SA_REQ );\r
- return status;\r
-}\r
-\r
-\r
-\r
-/*\r
- * SA request send completion callback.\r
- */\r
-void\r
-sa_req_send_comp_cb(\r
- IN const ib_mad_svc_handle_t h_mad_svc,\r
- IN void *mad_svc_context,\r
- IN ib_mad_element_t *p_request_mad )\r
-{\r
- al_sa_req_t *p_sa_req;\r
- sa_req_svc_t *p_sa_req_svc;\r
- KEVENT *p_sync_event;\r
-\r
- AL_ENTER( AL_DBG_SA_REQ );\r
-\r
- UNUSED_PARAM( h_mad_svc );\r
- UNUSED_PARAM( mad_svc_context );\r
-\r
- /*\r
- * Check the result of the send operation. If the send was successful,\r
- * we will be getting a receive callback with the response.\r
- */\r
- if( p_request_mad->status != IB_WCS_SUCCESS )\r
- {\r
- /* Notify the requestor of the result. */\r
- AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_QUERY,\r
- ("request failed - notifying user\n") );\r
-\r
- p_sa_req = p_request_mad->send_context1;\r
- p_sa_req_svc = p_sa_req->p_sa_req_svc;\r
- p_sync_event = p_sa_req->p_sync_event;\r
-\r
- p_sa_req->status = convert_wc_status( p_request_mad->status );\r
- p_sa_req->pfn_sa_req_cb( p_sa_req, NULL );\r
- if( p_sync_event )\r
- KeSetEvent( p_sync_event, 0, FALSE );\r
- deref_al_obj( &p_sa_req_svc->obj );\r
- }\r
-\r
- /* Return the MAD. */\r
- ib_put_mad( p_request_mad );\r
-\r
- AL_EXIT( AL_DBG_SA_REQ );\r
-}\r
-\r
-\r
-\r
-/*\r
- * SA request receive completion callback.\r
- */\r
-void\r
-sa_req_recv_comp_cb(\r
- IN const ib_mad_svc_handle_t h_mad_svc,\r
- IN void *mad_svc_context,\r
- IN ib_mad_element_t *p_mad_response )\r
-{\r
- al_sa_req_t *p_sa_req;\r
- sa_req_svc_t *p_sa_req_svc;\r
- ib_sa_mad_t *p_sa_mad;\r
- KEVENT *p_sync_event;\r
-\r
- AL_ENTER( AL_DBG_SA_REQ );\r
-\r
- UNUSED_PARAM( h_mad_svc );\r
- UNUSED_PARAM( mad_svc_context );\r
-\r
- p_sa_req = p_mad_response->send_context1;\r
- p_sa_req_svc = p_sa_req->p_sa_req_svc;\r
- p_sync_event = p_sa_req->p_sync_event;\r
-\r
- //*** check for SA redirection...\r
-\r
- /* Record the results of the request. */\r
- p_sa_mad = (ib_sa_mad_t*)ib_get_mad_buf( p_mad_response );\r
- if( p_sa_mad->status == IB_SA_MAD_STATUS_SUCCESS )\r
- p_sa_req->status = IB_SUCCESS;\r
- else\r
- p_sa_req->status = IB_REMOTE_ERROR;\r
-\r
- /* Notify the requestor of the result. */\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_SA_REQ, ("notifying user\n") );\r
- p_sa_req->pfn_sa_req_cb( p_sa_req, p_mad_response );\r
- if( p_sync_event )\r
- KeSetEvent( p_sync_event, 0, FALSE );\r
- deref_al_obj( &p_sa_req_svc->obj );\r
-\r
- AL_EXIT( AL_DBG_SA_REQ );\r
-}\r
-\r
-\r
-\r
-ib_api_status_t\r
-convert_wc_status(\r
- IN const ib_wc_status_t wc_status )\r
-{\r
- switch( wc_status )\r
- {\r
- case IB_WCS_SUCCESS:\r
- return IB_SUCCESS;\r
-\r
- case IB_WCS_TIMEOUT_RETRY_ERR:\r
- return IB_TIMEOUT;\r
-\r
- case IB_WCS_CANCELED:\r
- return IB_CANCELED;\r
-\r
- default:\r
- return IB_ERROR;\r
- }\r
-}\r
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
- * Copyright (c) 2006 Voltaire Corporation. All rights reserved.\r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_smi.c 744 2007-07-31 19:04:15Z leonidk $\r
- */\r
-\r
-\r
-#include <iba/ib_al.h>\r
-#include <complib/cl_timer.h>\r
-\r
-#include "ib_common.h"\r
-#include "al_common.h"\r
-#include "al_debug.h"\r
-#if defined(EVENT_TRACING)\r
-#ifdef offsetof\r
-#undef offsetof\r
-#endif\r
-#include "al_smi.tmh"\r
-#endif\r
-#include "al_verbs.h"\r
-#include "al_mgr.h"\r
-#include "al_pnp.h"\r
-#include "al_qp.h"\r
-#include "al_smi.h"\r
-#include "al_av.h"\r
-\r
-\r
-extern char node_desc[IB_NODE_DESCRIPTION_SIZE];\r
-\r
-#define SMI_POLL_INTERVAL 20000 /* Milliseconds */\r
-#define LOCAL_MAD_TIMEOUT 50 /* Milliseconds */\r
-#define DEFAULT_QP0_DEPTH 256\r
-#define DEFAULT_QP1_DEPTH 1024\r
-\r
-uint32_t g_smi_poll_interval = SMI_POLL_INTERVAL;\r
-spl_qp_mgr_t* gp_spl_qp_mgr = NULL;\r
-\r
-\r
-/*\r
- * Function prototypes.\r
- */\r
-void\r
-destroying_spl_qp_mgr(\r
- IN al_obj_t* p_obj );\r
-\r
-void\r
-free_spl_qp_mgr(\r
- IN al_obj_t* p_obj );\r
-\r
-ib_api_status_t\r
-spl_qp0_agent_pnp_cb(\r
- IN ib_pnp_rec_t* p_pnp_rec );\r
-\r
-ib_api_status_t\r
-spl_qp1_agent_pnp_cb(\r
- IN ib_pnp_rec_t* p_pnp_rec );\r
-\r
-ib_api_status_t\r
-spl_qp_agent_pnp(\r
- IN ib_pnp_rec_t* p_pnp_rec,\r
- IN ib_qp_type_t qp_type );\r
-\r
-ib_api_status_t\r
-create_spl_qp_svc(\r
- IN ib_pnp_port_rec_t* p_pnp_rec,\r
- IN const ib_qp_type_t qp_type );\r
-\r
-void\r
-destroying_spl_qp_svc(\r
- IN al_obj_t* p_obj );\r
-\r
-void\r
-free_spl_qp_svc(\r
- IN al_obj_t* p_obj );\r
-\r
-void\r
-spl_qp_svc_lid_change(\r
- IN al_obj_t* p_obj,\r
- IN ib_pnp_port_rec_t* p_pnp_rec );\r
-\r
-ib_api_status_t\r
-remote_mad_send(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN al_mad_wr_t* const p_mad_wr );\r
-\r
-static ib_api_status_t\r
-local_mad_send(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN al_mad_wr_t* const p_mad_wr );\r
-\r
-static ib_api_status_t\r
-loopback_mad(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN al_mad_wr_t* const p_mad_wr );\r
-\r
-static ib_api_status_t\r
-__process_subn_mad(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN al_mad_wr_t* const p_mad_wr );\r
-\r
-static ib_api_status_t\r
-fwd_local_mad(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN al_mad_wr_t* const p_mad_wr );\r
-\r
-void\r
-send_local_mad_cb(\r
- IN cl_async_proc_item_t* p_item );\r
-\r
-void\r
-spl_qp_send_comp_cb(\r
- IN const ib_cq_handle_t h_cq,\r
- IN void *cq_context );\r
-\r
-void\r
-spl_qp_recv_comp_cb(\r
- IN const ib_cq_handle_t h_cq,\r
- IN void *cq_context );\r
-\r
-void\r
-spl_qp_comp(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN const ib_cq_handle_t h_cq,\r
- IN ib_wc_type_t wc_type );\r
-\r
-ib_api_status_t\r
-process_mad_recv(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN ib_mad_element_t* p_mad_element );\r
-\r
-mad_route_t\r
-route_recv_smp(\r
- IN ib_mad_element_t* p_mad_element );\r
-\r
-mad_route_t\r
-route_recv_smp_attr(\r
- IN ib_mad_element_t* p_mad_element );\r
-\r
-mad_route_t\r
-route_recv_dm_mad(\r
- IN ib_mad_element_t* p_mad_element );\r
-\r
-mad_route_t\r
-route_recv_gmp(\r
- IN ib_mad_element_t* p_mad_element );\r
-\r
-mad_route_t\r
-route_recv_gmp_attr(\r
- IN ib_mad_element_t* p_mad_element );\r
-\r
-ib_api_status_t\r
-forward_sm_trap(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN ib_mad_element_t* p_mad_element );\r
-\r
-ib_api_status_t\r
-recv_local_mad(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN ib_mad_element_t* p_mad_request );\r
-\r
-void\r
-spl_qp_alias_send_cb(\r
- IN ib_mad_svc_handle_t h_mad_svc,\r
- IN void *mad_svc_context,\r
- IN ib_mad_element_t *p_mad_element );\r
-\r
-void\r
-spl_qp_alias_recv_cb(\r
- IN ib_mad_svc_handle_t h_mad_svc,\r
- IN void *mad_svc_context,\r
- IN ib_mad_element_t *p_mad_response );\r
-\r
-static ib_api_status_t\r
-spl_qp_svc_post_recvs(\r
- IN spl_qp_svc_t* const p_spl_qp_svc );\r
-\r
-void\r
-spl_qp_svc_event_cb(\r
- IN ib_async_event_rec_t *p_event_rec );\r
-\r
-void\r
-spl_qp_alias_event_cb(\r
- IN ib_async_event_rec_t *p_event_rec );\r
-\r
-void\r
-spl_qp_svc_reset(\r
- IN spl_qp_svc_t* p_spl_qp_svc );\r
-\r
-void\r
-spl_qp_svc_reset_cb(\r
- IN cl_async_proc_item_t* p_item );\r
-\r
-ib_api_status_t\r
-acquire_svc_disp(\r
- IN const cl_qmap_t* const p_svc_map,\r
- IN const ib_net64_t port_guid,\r
- OUT al_mad_disp_handle_t *ph_mad_disp );\r
-\r
-void\r
-smi_poll_timer_cb(\r
- IN void* context );\r
-\r
-void\r
-smi_post_recvs(\r
- IN cl_list_item_t* const p_list_item,\r
- IN void* context );\r
-\r
-#if defined( CL_USE_MUTEX )\r
-void\r
-spl_qp_send_async_cb(\r
- IN cl_async_proc_item_t* p_item );\r
-\r
-void\r
-spl_qp_recv_async_cb(\r
- IN cl_async_proc_item_t* p_item );\r
-#endif\r
-\r
-/*\r
- * Create the special QP manager.\r
- */\r
-ib_api_status_t\r
-create_spl_qp_mgr(\r
- IN al_obj_t* const p_parent_obj )\r
-{\r
- ib_pnp_req_t pnp_req;\r
- ib_api_status_t status;\r
- cl_status_t cl_status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_parent_obj );\r
- CL_ASSERT( !gp_spl_qp_mgr );\r
-\r
- gp_spl_qp_mgr = cl_zalloc( sizeof( spl_qp_mgr_t ) );\r
- if( !gp_spl_qp_mgr )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("IB_INSUFFICIENT_MEMORY\n") );\r
- return IB_INSUFFICIENT_MEMORY;\r
- }\r
-\r
- /* Construct the special QP manager. */\r
- construct_al_obj( &gp_spl_qp_mgr->obj, AL_OBJ_TYPE_SMI );\r
- cl_timer_construct( &gp_spl_qp_mgr->poll_timer );\r
-\r
- /* Initialize the lists. */\r
- cl_qmap_init( &gp_spl_qp_mgr->smi_map );\r
- cl_qmap_init( &gp_spl_qp_mgr->gsi_map );\r
-\r
- /* Initialize the global SMI/GSI manager object. */\r
- status = init_al_obj( &gp_spl_qp_mgr->obj, gp_spl_qp_mgr, TRUE,\r
- destroying_spl_qp_mgr, NULL, free_spl_qp_mgr );\r
- if( status != IB_SUCCESS )\r
- {\r
- free_spl_qp_mgr( &gp_spl_qp_mgr->obj );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("init_al_obj failed, %s\n", ib_get_err_str( status ) ) );\r
- return status;\r
- }\r
-\r
- /* Attach the special QP manager to the parent object. */\r
- status = attach_al_obj( p_parent_obj, &gp_spl_qp_mgr->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- /* Initialize the SMI polling timer. */\r
- cl_status = cl_timer_init( &gp_spl_qp_mgr->poll_timer, smi_poll_timer_cb,\r
- gp_spl_qp_mgr );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("cl_timer_init failed, status 0x%x\n", cl_status ) );\r
- return ib_convert_cl_status( cl_status );\r
- }\r
-\r
- /*\r
- * Note: PnP registrations for port events must be done\r
- * when the special QP manager is created. This ensures that\r
- * the registrations are listed sequentially and the reporting\r
- * of PnP events occurs in the proper order.\r
- */\r
-\r
- /*\r
- * Separate context is needed for each special QP. Therefore, a\r
- * separate PnP event registration is performed for QP0 and QP1.\r
- */\r
-\r
- /* Register for port PnP events for QP0. */\r
- cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
- pnp_req.pnp_class = IB_PNP_PORT;\r
- pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
- pnp_req.pfn_pnp_cb = spl_qp0_agent_pnp_cb;\r
-\r
- status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp0_pnp );\r
-\r
- if( status != IB_SUCCESS )\r
- {\r
- gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_reg_pnp QP0 failed, %s\n", ib_get_err_str( status ) ) );\r
- return status;\r
- }\r
-\r
- /* Reference the special QP manager on behalf of the ib_reg_pnp call. */\r
- ref_al_obj( &gp_spl_qp_mgr->obj );\r
-\r
- /* Register for port PnP events for QP1. */\r
- cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );\r
- pnp_req.pnp_class = IB_PNP_PORT;\r
- pnp_req.pnp_context = &gp_spl_qp_mgr->obj;\r
- pnp_req.pfn_pnp_cb = spl_qp1_agent_pnp_cb;\r
-\r
- status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp1_pnp );\r
-\r
- if( status != IB_SUCCESS )\r
- {\r
- gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_reg_pnp QP1 failed, %s\n", ib_get_err_str( status ) ) );\r
- return status;\r
- }\r
-\r
- /*\r
- * Note that we don't release the referende taken in init_al_obj\r
- * because we need one on behalf of the ib_reg_pnp call.\r
- */\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Pre-destroy the special QP manager.\r
- */\r
-void\r
-destroying_spl_qp_mgr(\r
- IN al_obj_t* p_obj )\r
-{\r
- ib_api_status_t status;\r
-\r
- CL_ASSERT( p_obj );\r
- CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
- UNUSED_PARAM( p_obj );\r
-\r
- /* Deregister for port PnP events for QP0. */\r
- if( gp_spl_qp_mgr->h_qp0_pnp )\r
- {\r
- status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp0_pnp,\r
- (ib_pfn_destroy_cb_t)deref_al_obj );\r
- CL_ASSERT( status == IB_SUCCESS );\r
- }\r
-\r
- /* Deregister for port PnP events for QP1. */\r
- if( gp_spl_qp_mgr->h_qp1_pnp )\r
- {\r
- status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp1_pnp,\r
- (ib_pfn_destroy_cb_t)deref_al_obj );\r
- CL_ASSERT( status == IB_SUCCESS );\r
- }\r
-\r
- /* Destroy the SMI polling timer. */\r
- cl_timer_destroy( &gp_spl_qp_mgr->poll_timer );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Free the special QP manager.\r
- */\r
-void\r
-free_spl_qp_mgr(\r
- IN al_obj_t* p_obj )\r
-{\r
- CL_ASSERT( p_obj );\r
- CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) );\r
- UNUSED_PARAM( p_obj );\r
-\r
- destroy_al_obj( &gp_spl_qp_mgr->obj );\r
- cl_free( gp_spl_qp_mgr );\r
- gp_spl_qp_mgr = NULL;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Special QP0 agent PnP event callback.\r
- */\r
-ib_api_status_t\r
-spl_qp0_agent_pnp_cb(\r
- IN ib_pnp_rec_t* p_pnp_rec )\r
-{\r
- ib_api_status_t status;\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP0 );\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Special QP1 agent PnP event callback.\r
- */\r
-ib_api_status_t\r
-spl_qp1_agent_pnp_cb(\r
- IN ib_pnp_rec_t* p_pnp_rec )\r
-{\r
- ib_api_status_t status;\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP1 );\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Special QP agent PnP event callback.\r
- */\r
-ib_api_status_t\r
-spl_qp_agent_pnp(\r
- IN ib_pnp_rec_t* p_pnp_rec,\r
- IN ib_qp_type_t qp_type )\r
-{\r
- ib_api_status_t status;\r
- al_obj_t* p_obj;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_pnp_rec );\r
- p_obj = p_pnp_rec->context;\r
-\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_SMI,\r
- ("p_pnp_rec->pnp_event = 0x%x (%s)\n",\r
- p_pnp_rec->pnp_event, ib_get_pnp_event_str( p_pnp_rec->pnp_event )) );\r
- /* Dispatch based on the PnP event type. */\r
- switch( p_pnp_rec->pnp_event )\r
- {\r
- case IB_PNP_PORT_ADD:\r
- CL_ASSERT( !p_obj );\r
- status = create_spl_qp_svc( (ib_pnp_port_rec_t*)p_pnp_rec, qp_type );\r
- break;\r
-\r
- case IB_PNP_PORT_REMOVE:\r
- CL_ASSERT( p_obj );\r
- ref_al_obj( p_obj );\r
- p_obj->pfn_destroy( p_obj, NULL );\r
- status = IB_SUCCESS;\r
- break;\r
-\r
- case IB_PNP_LID_CHANGE:\r
- CL_ASSERT( p_obj );\r
- spl_qp_svc_lid_change( p_obj, (ib_pnp_port_rec_t*)p_pnp_rec );\r
- status = IB_SUCCESS;\r
- break;\r
-\r
- default:\r
- /* All other events are ignored. */\r
- status = IB_SUCCESS;\r
- break;\r
- }\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Create a special QP service.\r
- */\r
-ib_api_status_t\r
-create_spl_qp_svc(\r
- IN ib_pnp_port_rec_t* p_pnp_rec,\r
- IN const ib_qp_type_t qp_type )\r
-{\r
- cl_status_t cl_status;\r
- spl_qp_svc_t* p_spl_qp_svc;\r
- ib_ca_handle_t h_ca;\r
- ib_cq_create_t cq_create;\r
- ib_qp_create_t qp_create;\r
- ib_qp_attr_t qp_attr;\r
- ib_mad_svc_t mad_svc;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_pnp_rec );\r
-\r
- if( ( qp_type != IB_QPT_QP0 ) && ( qp_type != IB_QPT_QP1 ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
- return IB_INVALID_PARAMETER;\r
- }\r
-\r
- CL_ASSERT( p_pnp_rec->pnp_rec.pnp_context );\r
- CL_ASSERT( p_pnp_rec->p_ca_attr );\r
- CL_ASSERT( p_pnp_rec->p_port_attr );\r
-\r
- p_spl_qp_svc = cl_zalloc( sizeof( spl_qp_svc_t ) );\r
- if( !p_spl_qp_svc )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("IB_INSUFFICIENT_MEMORY\n") );\r
- return IB_INSUFFICIENT_MEMORY;\r
- }\r
-\r
- /* Tie the special QP service to the port by setting the port number. */\r
- p_spl_qp_svc->port_num = p_pnp_rec->p_port_attr->port_num;\r
- /* Store the port GUID to allow faster lookups of the dispatchers. */\r
- p_spl_qp_svc->port_guid = p_pnp_rec->p_port_attr->port_guid;\r
-\r
- /* Initialize the send and receive queues. */\r
- cl_qlist_init( &p_spl_qp_svc->send_queue );\r
- cl_qlist_init( &p_spl_qp_svc->recv_queue );\r
- cl_spinlock_init(&p_spl_qp_svc->cache_lock);\r
- \r
-#if defined( CL_USE_MUTEX )\r
- /* Initialize async callbacks and flags for send/receive processing. */\r
- p_spl_qp_svc->send_async_queued = FALSE;\r
- p_spl_qp_svc->send_async_cb.pfn_callback = spl_qp_send_async_cb;\r
- p_spl_qp_svc->recv_async_queued = FALSE;\r
- p_spl_qp_svc->recv_async_cb.pfn_callback = spl_qp_recv_async_cb;\r
-#endif\r
-\r
- /* Initialize the async callback function to process local sends. */\r
- p_spl_qp_svc->send_async.pfn_callback = send_local_mad_cb;\r
-\r
- /* Initialize the async callback function to reset the QP on error. */\r
- p_spl_qp_svc->reset_async.pfn_callback = spl_qp_svc_reset_cb;\r
-\r
- /* Construct the special QP service object. */\r
- construct_al_obj( &p_spl_qp_svc->obj, AL_OBJ_TYPE_SMI );\r
-\r
- /* Initialize the special QP service object. */\r
- status = init_al_obj( &p_spl_qp_svc->obj, p_spl_qp_svc, TRUE,\r
- destroying_spl_qp_svc, NULL, free_spl_qp_svc );\r
- if( status != IB_SUCCESS )\r
- {\r
- free_spl_qp_svc( &p_spl_qp_svc->obj );\r
- return status;\r
- }\r
-\r
- /* Attach the special QP service to the parent object. */\r
- status = attach_al_obj(\r
- (al_obj_t* __ptr64)p_pnp_rec->pnp_rec.pnp_context, &p_spl_qp_svc->obj );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
- return status;\r
- }\r
-\r
- h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid );\r
- CL_ASSERT( h_ca );\r
- if( !h_ca )\r
- {\r
- p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("acquire_ca failed.\n") );\r
- return IB_INVALID_GUID;\r
- }\r
-\r
- p_spl_qp_svc->obj.p_ci_ca = h_ca->obj.p_ci_ca;\r
-\r
- /* Determine the maximum queue depth of the QP and CQs. */\r
- p_spl_qp_svc->max_qp_depth =\r
- ( p_pnp_rec->p_ca_attr->max_wrs <\r
- p_pnp_rec->p_ca_attr->max_cqes ) ?\r
- p_pnp_rec->p_ca_attr->max_wrs :\r
- p_pnp_rec->p_ca_attr->max_cqes;\r
-\r
- /* Compare this maximum to the default special queue depth. */\r
- if( ( qp_type == IB_QPT_QP0 ) &&\r
- ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP0_DEPTH ) )\r
- p_spl_qp_svc->max_qp_depth = DEFAULT_QP0_DEPTH;\r
- if( ( qp_type == IB_QPT_QP1 ) &&\r
- ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP1_DEPTH ) )\r
- p_spl_qp_svc->max_qp_depth = DEFAULT_QP1_DEPTH;\r
-\r
- /* Create the send CQ. */\r
- cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
- cq_create.size = p_spl_qp_svc->max_qp_depth;\r
- cq_create.pfn_comp_cb = spl_qp_send_comp_cb;\r
-\r
- status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
- p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_send_cq );\r
-\r
- if( status != IB_SUCCESS )\r
- {\r
- p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_create_cq send CQ failed, %s\n", ib_get_err_str( status ) ) );\r
- return status;\r
- }\r
-\r
- /* Reference the special QP service on behalf of ib_create_cq. */\r
- ref_al_obj( &p_spl_qp_svc->obj );\r
-\r
- /* Check the result of the creation request. */\r
- if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
- {\r
- p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_create_cq allocated insufficient send CQ size\n") );\r
- return IB_INSUFFICIENT_RESOURCES;\r
- }\r
-\r
- /* Create the receive CQ. */\r
- cl_memclr( &cq_create, sizeof( ib_cq_create_t ) );\r
- cq_create.size = p_spl_qp_svc->max_qp_depth;\r
- cq_create.pfn_comp_cb = spl_qp_recv_comp_cb;\r
-\r
- status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create,\r
- p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_recv_cq );\r
-\r
- if( status != IB_SUCCESS )\r
- {\r
- p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_create_cq recv CQ failed, %s\n", ib_get_err_str( status ) ) );\r
- return status;\r
- }\r
-\r
- /* Reference the special QP service on behalf of ib_create_cq. */\r
- ref_al_obj( &p_spl_qp_svc->obj );\r
-\r
- /* Check the result of the creation request. */\r
- if( cq_create.size < p_spl_qp_svc->max_qp_depth )\r
- {\r
- p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_create_cq allocated insufficient recv CQ size\n") );\r
- return IB_INSUFFICIENT_RESOURCES;\r
- }\r
-\r
- /* Create the special QP. */\r
- cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
- qp_create.qp_type = qp_type;\r
- qp_create.sq_depth = p_spl_qp_svc->max_qp_depth;\r
- qp_create.rq_depth = p_spl_qp_svc->max_qp_depth;\r
- qp_create.sq_sge = 3; /* Three entries are required for segmentation. */\r
- qp_create.rq_sge = 1;\r
- qp_create.h_sq_cq = p_spl_qp_svc->h_send_cq;\r
- qp_create.h_rq_cq = p_spl_qp_svc->h_recv_cq;\r
- qp_create.sq_signaled = TRUE;\r
-\r
- status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd,\r
- p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
- p_spl_qp_svc, spl_qp_svc_event_cb, NULL, &p_spl_qp_svc->h_qp );\r
-\r
- if( status != IB_SUCCESS )\r
- {\r
- p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_get_spl_qp failed, %s\n", ib_get_err_str( status ) ) );\r
- return status;\r
- }\r
-\r
- /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
- ref_al_obj( &p_spl_qp_svc->obj );\r
-\r
- /* Check the result of the creation request. */\r
- status = ib_query_qp( p_spl_qp_svc->h_qp, &qp_attr );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_query_qp failed, %s\n", ib_get_err_str( status ) ) );\r
- return status;\r
- }\r
-\r
- if( ( qp_attr.rq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
- ( qp_attr.sq_depth < p_spl_qp_svc->max_qp_depth ) ||\r
- ( qp_attr.sq_sge < 3 ) || ( qp_attr.rq_sge < 1 ) )\r
- {\r
- p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_get_spl_qp allocated attributes are insufficient\n") );\r
- return IB_INSUFFICIENT_RESOURCES;\r
- }\r
-\r
- /* Initialize the QP for use. */\r
- status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_init_dgrm_svc failed, %s\n", ib_get_err_str( status ) ) );\r
- return status;\r
- }\r
-\r
- /* Post receive buffers. */\r
- cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
- status = spl_qp_svc_post_recvs( p_spl_qp_svc );\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("spl_qp_svc_post_recvs failed, %s\n",\r
- ib_get_err_str( status ) ) );\r
- return status;\r
- }\r
-\r
- /* Create the MAD dispatcher. */\r
- status = create_mad_disp( &p_spl_qp_svc->obj, p_spl_qp_svc->h_qp,\r
- &p_spl_qp_svc->h_mad_disp );\r
- if( status != IB_SUCCESS )\r
- {\r
- p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
- AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("create_mad_disp failed, %s\n", ib_get_err_str( status ) ) );\r
- return status;\r
- }\r
-\r
- /*\r
- * Add this service to the special QP manager lookup lists.\r
- * The service must be added to allow the creation of a QP alias.\r
- */\r
- cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
- if( qp_type == IB_QPT_QP0 )\r
- {\r
- cl_qmap_insert( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid,\r
- &p_spl_qp_svc->map_item );\r
- }\r
- else\r
- {\r
- cl_qmap_insert( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid,\r
- &p_spl_qp_svc->map_item );\r
- }\r
- cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
-\r
- /*\r
- * If the CA does not support HW agents, create a QP alias and register\r
- * a MAD service for sending responses from the local MAD interface.\r
- */\r
- if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
- {\r
- /* Create a QP alias. */\r
- cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );\r
- qp_create.qp_type =\r
- ( qp_type == IB_QPT_QP0 ) ? IB_QPT_QP0_ALIAS : IB_QPT_QP1_ALIAS;\r
- qp_create.sq_depth = p_spl_qp_svc->max_qp_depth;\r
- qp_create.sq_sge = 1;\r
- qp_create.sq_signaled = TRUE;\r
-\r
- status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd_alias,\r
- p_pnp_rec->p_port_attr->port_guid, &qp_create,\r
- p_spl_qp_svc, spl_qp_alias_event_cb, &p_spl_qp_svc->pool_key,\r
- &p_spl_qp_svc->h_qp_alias );\r
-\r
- if (status != IB_SUCCESS)\r
- {\r
- p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_get_spl_qp alias failed, %s\n",\r
- ib_get_err_str( status ) ) );\r
- return status;\r
- }\r
-\r
- /* Reference the special QP service on behalf of ib_get_spl_qp. */\r
- ref_al_obj( &p_spl_qp_svc->obj );\r
-\r
- /* Register a MAD service for sends. */\r
- cl_memclr( &mad_svc, sizeof( ib_mad_svc_t ) );\r
- mad_svc.mad_svc_context = p_spl_qp_svc;\r
- mad_svc.pfn_mad_send_cb = spl_qp_alias_send_cb;\r
- mad_svc.pfn_mad_recv_cb = spl_qp_alias_recv_cb;\r
-\r
- status = ib_reg_mad_svc( p_spl_qp_svc->h_qp_alias, &mad_svc,\r
- &p_spl_qp_svc->h_mad_svc );\r
-\r
- if( status != IB_SUCCESS )\r
- {\r
- p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("ib_reg_mad_svc failed, %s\n", ib_get_err_str( status ) ) );\r
- return status;\r
- }\r
- }\r
-\r
- /* Set the context of the PnP event to this child object. */\r
- p_pnp_rec->pnp_rec.context = &p_spl_qp_svc->obj;\r
-\r
- /* The QP is ready. Change the state. */\r
- p_spl_qp_svc->state = SPL_QP_ACTIVE;\r
-\r
- /* Force a completion callback to rearm the CQs. */\r
- spl_qp_recv_comp_cb( p_spl_qp_svc->h_recv_cq, p_spl_qp_svc );\r
- spl_qp_send_comp_cb( p_spl_qp_svc->h_send_cq, p_spl_qp_svc );\r
-\r
- /* Start the polling thread timer. */\r
- if( g_smi_poll_interval )\r
- {\r
- cl_status =\r
- cl_timer_trim( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );\r
-\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL );\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("cl_timer_start failed, status 0x%x\n", cl_status ) );\r
- return ib_convert_cl_status( cl_status );\r
- }\r
- }\r
-\r
- /* Release the reference taken in init_al_obj. */\r
- deref_al_obj( &p_spl_qp_svc->obj );\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Return a work completion to the MAD dispatcher for the specified MAD.\r
- */\r
-static void\r
-__complete_send_mad(\r
- IN const al_mad_disp_handle_t h_mad_disp,\r
- IN al_mad_wr_t* const p_mad_wr,\r
- IN const ib_wc_status_t wc_status )\r
-{\r
- ib_wc_t wc;\r
-\r
- /* Construct a send work completion. */\r
- cl_memclr( &wc, sizeof( ib_wc_t ) );\r
- wc.wr_id = p_mad_wr->send_wr.wr_id;\r
- wc.wc_type = IB_WC_SEND;\r
- wc.status = wc_status;\r
-\r
- /* Set the send size if we were successful with the send. */\r
- if( wc_status == IB_WCS_SUCCESS )\r
- wc.length = MAD_BLOCK_SIZE;\r
-\r
- mad_disp_send_done( h_mad_disp, p_mad_wr, &wc );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Pre-destroy a special QP service.\r
- */\r
-void\r
-destroying_spl_qp_svc(\r
- IN al_obj_t* p_obj )\r
-{\r
- spl_qp_svc_t* p_spl_qp_svc;\r
- cl_list_item_t* p_list_item;\r
- al_mad_wr_t* p_mad_wr;\r
-\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_obj );\r
- p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
-\r
- /* Change the state to prevent processing new send requests. */\r
- cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
- p_spl_qp_svc->state = SPL_QP_DESTROYING;\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
-\r
- /* Wait here until the special QP service is no longer in use. */\r
- while( p_spl_qp_svc->in_use_cnt )\r
- {\r
- cl_thread_suspend( 0 );\r
- }\r
-\r
- /* Destroy the special QP. */\r
- if( p_spl_qp_svc->h_qp )\r
- {\r
- /* If present, remove the special QP service from the tracking map. */\r
- cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
- if( p_spl_qp_svc->h_qp->type == IB_QPT_QP0 )\r
- {\r
- cl_qmap_remove( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid );\r
- }\r
- else\r
- {\r
- cl_qmap_remove( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid );\r
- }\r
- cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
-\r
- status = ib_destroy_qp( p_spl_qp_svc->h_qp,\r
- (ib_pfn_destroy_cb_t)deref_al_obj );\r
- CL_ASSERT( status == IB_SUCCESS );\r
-\r
- cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
-\r
- /* Complete any outstanding MAD sends operations as "flushed". */\r
- for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue );\r
- p_list_item != cl_qlist_end( &p_spl_qp_svc->send_queue );\r
- p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue ) )\r
- {\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
- p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );\r
- __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
- IB_WCS_WR_FLUSHED_ERR );\r
- cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
- }\r
-\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
- /* Receive MAD elements are returned to the pool by the free routine. */\r
- }\r
-\r
- /* Destroy the special QP alias and CQs. */\r
- if( p_spl_qp_svc->h_qp_alias )\r
- {\r
- status = ib_destroy_qp( p_spl_qp_svc->h_qp_alias,\r
- (ib_pfn_destroy_cb_t)deref_al_obj );\r
- CL_ASSERT( status == IB_SUCCESS );\r
- }\r
- if( p_spl_qp_svc->h_send_cq )\r
- {\r
- status = ib_destroy_cq( p_spl_qp_svc->h_send_cq,\r
- (ib_pfn_destroy_cb_t)deref_al_obj );\r
- CL_ASSERT( status == IB_SUCCESS );\r
- }\r
- if( p_spl_qp_svc->h_recv_cq )\r
- {\r
- status = ib_destroy_cq( p_spl_qp_svc->h_recv_cq,\r
- (ib_pfn_destroy_cb_t)deref_al_obj );\r
- CL_ASSERT( status == IB_SUCCESS );\r
- }\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Free a special QP service.\r
- */\r
-void\r
-free_spl_qp_svc(\r
- IN al_obj_t* p_obj )\r
-{\r
- spl_qp_svc_t* p_spl_qp_svc;\r
- cl_list_item_t* p_list_item;\r
- al_mad_element_t* p_al_mad;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_obj );\r
- p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
-\r
- /* Dereference the CA. */\r
- if( p_spl_qp_svc->obj.p_ci_ca )\r
- deref_al_obj( &p_spl_qp_svc->obj.p_ci_ca->h_ca->obj );\r
-\r
- /* Return receive MAD elements to the pool. */\r
- for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );\r
- p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );\r
- p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )\r
- {\r
- p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );\r
-\r
- status = ib_put_mad( &p_al_mad->element );\r
- CL_ASSERT( status == IB_SUCCESS );\r
- }\r
-\r
- CL_ASSERT( cl_is_qlist_empty( &p_spl_qp_svc->send_queue ) );\r
-\r
- destroy_al_obj( &p_spl_qp_svc->obj );\r
- cl_free( p_spl_qp_svc );\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Update the base LID of a special QP service.\r
- */\r
-void\r
-spl_qp_svc_lid_change(\r
- IN al_obj_t* p_obj,\r
- IN ib_pnp_port_rec_t* p_pnp_rec )\r
-{\r
- spl_qp_svc_t* p_spl_qp_svc;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_obj );\r
- CL_ASSERT( p_pnp_rec );\r
- CL_ASSERT( p_pnp_rec->p_port_attr );\r
-\r
- p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
-\r
- p_spl_qp_svc->base_lid = p_pnp_rec->p_port_attr->lid;\r
- p_spl_qp_svc->lmc = p_pnp_rec->p_port_attr->lmc;\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Route a send work request.\r
- */\r
-mad_route_t\r
-route_mad_send(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN ib_send_wr_t* const p_send_wr )\r
-{\r
- al_mad_wr_t* p_mad_wr;\r
- al_mad_send_t* p_mad_send;\r
- ib_mad_t* p_mad;\r
- ib_smp_t* p_smp;\r
- ib_av_handle_t h_av;\r
- mad_route_t route;\r
- boolean_t local, loopback, discard;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_spl_qp_svc );\r
- CL_ASSERT( p_send_wr );\r
-\r
- /* Initialize a pointers to the MAD work request and the MAD. */\r
- p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
- p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
- p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
- p_smp = (ib_smp_t*)p_mad;\r
-\r
- /* Check if the CA has a local MAD interface. */\r
- local = loopback = discard = FALSE;\r
- if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
- {\r
- /*\r
- * If the MAD is a locally addressed Subnet Management, Performance\r
- * Management, or Connection Management datagram, process the work\r
- * request locally.\r
- */\r
- h_av = p_send_wr->dgrm.ud.h_av;\r
- switch( p_mad->mgmt_class )\r
- {\r
- case IB_MCLASS_SUBN_DIR:\r
- /* Perform special checks on directed route SMPs. */\r
- if( ib_smp_is_response( p_smp ) )\r
- {\r
- /*\r
- * This node is the originator of the response. Discard\r
- * if the hop count or pointer is zero, an intermediate hop,\r
- * out of bounds hop, or if the first port of the directed\r
- * route retrun path is not this port.\r
- */\r
- if( ( p_smp->hop_count == 0 ) || ( p_smp->hop_ptr == 0 ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("hop cnt or hop ptr set to 0...discarding\n") );\r
- discard = TRUE;\r
- }\r
- else if( p_smp->hop_count != ( p_smp->hop_ptr - 1 ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("hop cnt != (hop ptr - 1)...discarding\n") );\r
- discard = TRUE;\r
- }\r
- else if( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("hop cnt > max hops...discarding\n") );\r
- discard = TRUE;\r
- }\r
- else if( ( p_smp->dr_dlid == IB_LID_PERMISSIVE ) &&\r
- ( p_smp->return_path[ p_smp->hop_ptr - 1 ] !=\r
- p_spl_qp_svc->port_num ) )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("return path[hop ptr - 1] != port num...discarding\n") );\r
- discard = TRUE;\r
- }\r
- }\r
- else\r
- {\r
- /* The SMP is a request. */\r
- if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
- ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
- {\r
- discard = TRUE;\r
- }\r
- else if( ( p_smp->hop_count == 0 ) && ( p_smp->hop_ptr == 0 ) )\r
- {\r
- /* Self Addressed: Sent locally, routed locally. */\r
- local = TRUE;\r
- discard = ( p_smp->dr_slid != IB_LID_PERMISSIVE ) ||\r
- ( p_smp->dr_dlid != IB_LID_PERMISSIVE );\r
- }\r
- else if( ( p_smp->hop_count != 0 ) &&\r
- ( p_smp->hop_count == ( p_smp->hop_ptr - 1 ) ) )\r
- {\r
- /* End of Path: Sent remotely, routed locally. */\r
- local = TRUE;\r
- }\r
- else if( ( p_smp->hop_count != 0 ) &&\r
- ( p_smp->hop_ptr == 0 ) )\r
- {\r
- /* Beginning of Path: Sent locally, routed remotely. */\r
- if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
- {\r
- discard =\r
- ( p_smp->initial_path[ p_smp->hop_ptr + 1 ] !=\r
- p_spl_qp_svc->port_num );\r
- }\r
- }\r
- else\r
- {\r
- /* Intermediate hop. */\r
- discard = TRUE;\r
- }\r
- }\r
- /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
- loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
- break;\r
-\r
- case IB_MCLASS_SUBN_LID:\r
- /* Loopback locally addressed SM to SM "heartbeat" messages. */\r
- loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO);\r
-\r
- /* Fall through to check for a local MAD. */\r
-\r
- case IB_MCLASS_PERF:\r
- case IB_MCLASS_BM:\r
- local = ( h_av &&\r
- ( h_av->av_attr.dlid ==\r
- ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
- break;\r
-\r
- default:\r
- /* Route vendor specific MADs to the HCA provider. */\r
- if( ib_class_is_vendor_specific( p_mad->mgmt_class ) )\r
- {\r
- local = ( h_av &&\r
- ( h_av->av_attr.dlid ==\r
- ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) );\r
- }\r
- break;\r
- }\r
- }\r
-\r
- route = ( p_mad_send->p_send_mad->send_opt & IB_SEND_OPT_LOCAL ) ?\r
- ROUTE_LOCAL : ROUTE_REMOTE;\r
- if( local ) route = ROUTE_LOCAL;\r
- if( loopback && local ) route = ROUTE_LOOPBACK;\r
- if( discard ) route = ROUTE_DISCARD;\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return route;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Send a work request on the special QP.\r
- */\r
-ib_api_status_t\r
-spl_qp_svc_send(\r
- IN const ib_qp_handle_t h_qp,\r
- IN ib_send_wr_t* const p_send_wr )\r
-{\r
- spl_qp_svc_t* p_spl_qp_svc;\r
- al_mad_wr_t* p_mad_wr;\r
- mad_route_t route;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( h_qp );\r
- CL_ASSERT( p_send_wr );\r
-\r
- /* Get the special QP service. */\r
- p_spl_qp_svc = (spl_qp_svc_t*)h_qp->obj.context;\r
- CL_ASSERT( p_spl_qp_svc );\r
- CL_ASSERT( p_spl_qp_svc->h_qp == h_qp );\r
-\r
- /* Determine how to route the MAD. */\r
- route = route_mad_send( p_spl_qp_svc, p_send_wr );\r
-\r
- /*\r
- * Check the QP state and guard against error handling. Also,\r
- * to maintain proper order of work completions, delay processing\r
- * a local MAD until any remote MAD work requests have completed,\r
- * and delay processing a remote MAD until local MAD work requests\r
- * have completed.\r
- */\r
- cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
- if( (p_spl_qp_svc->state != SPL_QP_ACTIVE) || p_spl_qp_svc->local_mad_wr ||\r
- (is_local(route) && !cl_is_qlist_empty( &p_spl_qp_svc->send_queue )) ||\r
- ( cl_qlist_count( &p_spl_qp_svc->send_queue ) >=\r
- p_spl_qp_svc->max_qp_depth ) )\r
- {\r
- /*\r
- * Return busy status.\r
- * The special QP will resume sends at this point.\r
- */\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_RESOURCE_BUSY;\r
- }\r
-\r
- p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr );\r
-\r
- if( is_local( route ) )\r
- {\r
- /* Save the local MAD work request for processing. */\r
- p_spl_qp_svc->local_mad_wr = p_mad_wr;\r
-\r
- /* Flag the service as in use by the asynchronous processing thread. */\r
- cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
-\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
-\r
- status = local_mad_send( p_spl_qp_svc, p_mad_wr );\r
- }\r
- else\r
- {\r
- /* Process a remote MAD send work request. */\r
- status = remote_mad_send( p_spl_qp_svc, p_mad_wr );\r
-\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
- }\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process a remote MAD send work request. Called holding the spl_qp_svc lock.\r
- */\r
-ib_api_status_t\r
-remote_mad_send(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN al_mad_wr_t* const p_mad_wr )\r
-{\r
- ib_smp_t* p_smp;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_spl_qp_svc );\r
- CL_ASSERT( p_mad_wr );\r
-\r
- /* Initialize a pointers to the MAD work request and outbound MAD. */\r
- p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
-\r
- /* Perform outbound MAD processing. */\r
-\r
- /* Adjust directed route SMPs as required by IBA. */\r
- if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
- {\r
- if( ib_smp_is_response( p_smp ) )\r
- {\r
- if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
- p_smp->hop_ptr--;\r
- }\r
- else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
- {\r
- /*\r
- * Only update the pointer if the hw_agent is not implemented.\r
- * Fujitsu implements SMI in hardware, so the following has to\r
- * be passed down to the hardware SMI.\r
- */\r
- ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
- if( !p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents )\r
- p_smp->hop_ptr++;\r
- ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
- }\r
- }\r
-\r
- /* Always generate send completions. */\r
- p_mad_wr->send_wr.send_opt |= IB_SEND_OPT_SIGNALED;\r
-\r
- /* Queue the MAD work request on the service tracking queue. */\r
- cl_qlist_insert_tail( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
-\r
- status = ib_post_send( p_spl_qp_svc->h_qp, &p_mad_wr->send_wr, NULL );\r
-\r
- if( status != IB_SUCCESS )\r
- {\r
- cl_qlist_remove_item( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item );\r
-\r
- /* Reset directed route SMPs as required by IBA. */\r
- if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
- {\r
- if( ib_smp_is_response( p_smp ) )\r
- {\r
- if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
- p_smp->hop_ptr++;\r
- }\r
- else if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
- {\r
- /* Only update if the hw_agent is not implemented. */\r
- ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
- if( p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents == FALSE )\r
- p_smp->hop_ptr--;\r
- ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca );\r
- }\r
- }\r
- }\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
-}\r
-\r
-\r
-/*\r
- * Handle a MAD destined for the local CA, using cached data\r
- * as much as possible.\r
- */\r
-static ib_api_status_t\r
-local_mad_send(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN al_mad_wr_t* const p_mad_wr )\r
-{\r
- mad_route_t route;\r
- ib_api_status_t status = IB_SUCCESS;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_spl_qp_svc );\r
- CL_ASSERT( p_mad_wr );\r
-\r
- /* Determine how to route the MAD. */\r
- route = route_mad_send( p_spl_qp_svc, &p_mad_wr->send_wr );\r
-\r
- /* Check if this MAD should be discarded. */\r
- if( is_discard( route ) )\r
- {\r
- /* Deliver a "work completion" to the dispatcher. */\r
- __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
- IB_WCS_LOCAL_OP_ERR );\r
- status = IB_INVALID_SETTING;\r
- }\r
- else if( is_loopback( route ) )\r
- {\r
- /* Loopback local SM to SM "heartbeat" messages. */\r
- status = loopback_mad( p_spl_qp_svc, p_mad_wr );\r
- }\r
- else\r
- {\r
- switch( get_mad_hdr_from_wr( p_mad_wr )->mgmt_class )\r
- {\r
- case IB_MCLASS_SUBN_DIR:\r
- case IB_MCLASS_SUBN_LID:\r
- //DO not use the cache in order to force Mkey check\r
- status = __process_subn_mad( p_spl_qp_svc, p_mad_wr );\r
- //status = IB_NOT_DONE;\r
- break;\r
-\r
- default:\r
- status = IB_NOT_DONE;\r
- }\r
- }\r
-\r
- if( status == IB_NOT_DONE )\r
- {\r
- /* Queue an asynchronous processing item to process the local MAD. */\r
- cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async );\r
- }\r
- else\r
- {\r
- /*\r
- * Clear the local MAD pointer to allow processing of other MADs.\r
- * This is done after polling for attribute changes to ensure that\r
- * subsequent MADs pick up any changes performed by this one.\r
- */\r
- cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
- p_spl_qp_svc->local_mad_wr = NULL;\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
-\r
- /* No longer in use by the asynchronous processing thread. */\r
- cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
-\r
- /* Special QP operations will resume by unwinding. */\r
- }\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-get_resp_mad(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN al_mad_wr_t* const p_mad_wr,\r
- OUT ib_mad_element_t** const pp_mad_resp )\r
-{\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_spl_qp_svc );\r
- CL_ASSERT( p_mad_wr );\r
- CL_ASSERT( pp_mad_resp );\r
-\r
- /* Get a MAD element from the pool for the response. */\r
- status = ib_get_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->pool_key,\r
- MAD_BLOCK_SIZE, pp_mad_resp );\r
- if( status != IB_SUCCESS )\r
- {\r
- __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
- IB_WCS_LOCAL_OP_ERR );\r
- }\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-complete_local_mad(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN al_mad_wr_t* const p_mad_wr,\r
- IN ib_mad_element_t* const p_mad_resp )\r
-{\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_spl_qp_svc );\r
- CL_ASSERT( p_mad_wr );\r
- CL_ASSERT( p_mad_resp );\r
-\r
- /* Construct the receive MAD element. */\r
- p_mad_resp->status = IB_WCS_SUCCESS;\r
- p_mad_resp->remote_qp = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
- p_mad_resp->remote_lid = p_spl_qp_svc->base_lid;\r
- if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
- {\r
- p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;\r
- p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
- }\r
-\r
- /*\r
- * Hand the receive MAD element to the dispatcher before completing\r
- * the send. This guarantees that the send request cannot time out.\r
- */\r
- status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );\r
-\r
- /* Forward the send work completion to the dispatcher. */\r
- __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-loopback_mad(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN al_mad_wr_t* const p_mad_wr )\r
-{\r
- ib_mad_t *p_mad;\r
- ib_mad_element_t *p_mad_resp;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_spl_qp_svc );\r
- CL_ASSERT( p_mad_wr );\r
-\r
- /* Get a MAD element from the pool for the response. */\r
- status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
- if( status == IB_SUCCESS )\r
- {\r
- /* Initialize a pointers to the MAD work request and outbound MAD. */\r
- p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
-\r
- /* Simulate a send/receive between local managers. */\r
- cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
-\r
- /* Construct the receive MAD element. */\r
- p_mad_resp->status = IB_WCS_SUCCESS;\r
- p_mad_resp->remote_qp = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
- p_mad_resp->remote_lid = p_spl_qp_svc->base_lid;\r
- if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
- {\r
- p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data;\r
- p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
- }\r
-\r
- /*\r
- * Hand the receive MAD element to the dispatcher before completing\r
- * the send. This guarantees that the send request cannot time out.\r
- */\r
- status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp );\r
-\r
- /* Forward the send work completion to the dispatcher. */\r
- __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS );\r
-\r
- }\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
-}\r
-\r
-\r
-static void\r
-__update_guid_info(\r
- IN spl_qp_cache_t* const p_cache,\r
- IN const ib_smp_t* const p_mad )\r
-{\r
- uint32_t idx;\r
-\r
- /* Get the table selector from the attribute */\r
- idx = cl_ntoh32( p_mad->attr_mod );\r
-\r
- /*\r
- * We only get successful MADs here, so invalid settings\r
- * shouldn't happen.\r
- */\r
- CL_ASSERT( idx <= 31 );\r
-\r
- cl_memcpy( &p_cache->guid_block[idx].tbl,\r
- ib_smp_get_payload_ptr( p_mad ),\r
- sizeof(ib_guid_info_t) );\r
- p_cache->guid_block[idx].valid = TRUE;\r
-}\r
-\r
-\r
-static void\r
-__update_pkey_table(\r
- IN spl_qp_cache_t* const p_cache,\r
- IN const ib_smp_t* const p_mad )\r
-{\r
- uint16_t idx;\r
-\r
- /* Get the table selector from the attribute */\r
- idx = ((uint16_t)cl_ntoh32( p_mad->attr_mod ));\r
-\r
- CL_ASSERT( idx <= 2047 );\r
-\r
- cl_memcpy( &p_cache->pkey_tbl[idx].tbl,\r
- ib_smp_get_payload_ptr( p_mad ),\r
- sizeof(ib_pkey_table_t) );\r
- p_cache->pkey_tbl[idx].valid = TRUE;\r
-}\r
-\r
-\r
-static void\r
-__update_sl_vl_table(\r
- IN spl_qp_cache_t* const p_cache,\r
- IN const ib_smp_t* const p_mad )\r
-{\r
- cl_memcpy( &p_cache->sl_vl.tbl,\r
- ib_smp_get_payload_ptr( p_mad ),\r
- sizeof(ib_slvl_table_t) );\r
- p_cache->sl_vl.valid = TRUE;\r
-}\r
-\r
-\r
-static void\r
-__update_vl_arb_table(\r
- IN spl_qp_cache_t* const p_cache,\r
- IN const ib_smp_t* const p_mad )\r
-{\r
- uint16_t idx;\r
-\r
- /* Get the table selector from the attribute */\r
- idx = ((uint16_t)(cl_ntoh32( p_mad->attr_mod ) >> 16)) - 1;\r
-\r
- CL_ASSERT( idx <= 3 );\r
-\r
- cl_memcpy( &p_cache->vl_arb[idx].tbl,\r
- ib_smp_get_payload_ptr( p_mad ),\r
- sizeof(ib_vl_arb_table_t) );\r
- p_cache->vl_arb[idx].valid = TRUE;\r
-}\r
-\r
-\r
-\r
-void\r
-spl_qp_svc_update_cache(\r
- IN spl_qp_svc_t *p_spl_qp_svc,\r
- IN ib_smp_t *p_mad )\r
-{\r
-\r
-\r
-\r
- CL_ASSERT( p_spl_qp_svc );\r
- CL_ASSERT( p_mad );\r
- CL_ASSERT( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR ||\r
- p_mad->mgmt_class == IB_MCLASS_SUBN_LID);\r
- CL_ASSERT(!p_mad->status);\r
-\r
- cl_spinlock_acquire(&p_spl_qp_svc->cache_lock);\r
- \r
- switch( p_mad->attr_id )\r
- {\r
- case IB_MAD_ATTR_GUID_INFO:\r
- __update_guid_info(\r
- &p_spl_qp_svc->cache, p_mad );\r
- break;\r
-\r
- case IB_MAD_ATTR_P_KEY_TABLE:\r
- __update_pkey_table(\r
- &p_spl_qp_svc->cache, p_mad );\r
- break;\r
-\r
- case IB_MAD_ATTR_SLVL_TABLE:\r
- __update_sl_vl_table(\r
- &p_spl_qp_svc->cache, p_mad );\r
- break;\r
-\r
- case IB_MAD_ATTR_VL_ARBITRATION:\r
- __update_vl_arb_table(\r
- &p_spl_qp_svc->cache, p_mad );\r
- break;\r
-\r
- default:\r
- break;\r
- }\r
- \r
- cl_spinlock_release(&p_spl_qp_svc->cache_lock);\r
-}\r
-\r
-\r
-\r
-static ib_api_status_t\r
-__process_node_info(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN al_mad_wr_t* const p_mad_wr )\r
-{\r
- ib_mad_t *p_mad;\r
- ib_mad_element_t *p_mad_resp;\r
- ib_smp_t *p_smp;\r
- ib_node_info_t *p_node_info;\r
- ib_ca_attr_t *p_ca_attr;\r
- ib_port_attr_t *p_port_attr;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_spl_qp_svc );\r
- CL_ASSERT( p_mad_wr );\r
-\r
- /* Initialize a pointers to the MAD work request and outbound MAD. */\r
- p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
- if( p_mad->method != IB_MAD_METHOD_GET )\r
- {\r
- /* Node description is a GET-only attribute. */\r
- __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
- IB_WCS_LOCAL_OP_ERR );\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_INVALID_SETTING;\r
- }\r
-\r
- /* Get a MAD element from the pool for the response. */\r
- status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
- if( status == IB_SUCCESS )\r
- {\r
- p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
- cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
- p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
- if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
- p_smp->status = IB_SMP_DIRECTION;\r
- else\r
- p_smp->status = 0;\r
-\r
- p_node_info = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp );\r
-\r
- /*\r
- * Fill in the node info, protecting against the\r
- * attributes being changed by PnP.\r
- */\r
- cl_spinlock_acquire( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
-\r
- p_ca_attr = p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr;\r
- p_port_attr = &p_ca_attr->p_port_attr[p_spl_qp_svc->port_num - 1];\r
-\r
- p_node_info->base_version = 1;\r
- p_node_info->class_version = 1;\r
- p_node_info->node_type = IB_NODE_TYPE_CA;\r
- p_node_info->num_ports = p_ca_attr->num_ports;\r
- p_node_info->sys_guid = p_ca_attr->system_image_guid;\r
- p_node_info->node_guid = p_ca_attr->ca_guid;\r
- p_node_info->port_guid = p_port_attr->port_guid;\r
- p_node_info->partition_cap = cl_hton16( p_port_attr->num_pkeys );\r
- p_node_info->device_id = cl_hton16( p_ca_attr->dev_id );\r
- p_node_info->revision = cl_hton32( p_ca_attr->revision );\r
- p_node_info->port_num_vendor_id =\r
- cl_hton32( p_ca_attr->vend_id & 0x00FFFFFF ) | p_port_attr->port_num;\r
- cl_spinlock_release( &p_spl_qp_svc->obj.p_ci_ca->obj.lock );\r
-\r
- status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
- }\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__process_node_desc(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN al_mad_wr_t* const p_mad_wr )\r
-{\r
- ib_mad_t *p_mad;\r
- ib_mad_element_t *p_mad_resp;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_spl_qp_svc );\r
- CL_ASSERT( p_mad_wr );\r
-\r
- /* Initialize a pointers to the MAD work request and outbound MAD. */\r
- p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
- if( p_mad->method != IB_MAD_METHOD_GET )\r
- {\r
- /* Node info is a GET-only attribute. */\r
- __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
- IB_WCS_LOCAL_OP_ERR );\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_INVALID_SETTING;\r
- }\r
-\r
- /* Get a MAD element from the pool for the response. */\r
- status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
- if( status == IB_SUCCESS )\r
- {\r
- cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE );\r
- p_mad_resp->p_mad_buf->method =\r
- (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
- if( p_mad_resp->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_DIR )\r
- p_mad_resp->p_mad_buf->status = IB_SMP_DIRECTION;\r
- else\r
- p_mad_resp->p_mad_buf->status = 0;\r
- /* Set the node description to the machine name. */\r
- cl_memcpy( ((ib_smp_t*)p_mad_resp->p_mad_buf)->data, \r
- node_desc, sizeof(node_desc) );\r
-\r
- status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
- }\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
-}\r
-\r
-static ib_api_status_t\r
-__process_guid_info(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN al_mad_wr_t* const p_mad_wr )\r
-{\r
- \r
- ib_mad_t *p_mad;\r
- ib_mad_element_t *p_mad_resp;\r
- ib_smp_t *p_smp;\r
- ib_guid_info_t *p_guid_info;\r
- uint16_t idx;\r
- ib_api_status_t status;\r
-\r
-\r
- /* Initialize a pointers to the MAD work request and outbound MAD. */\r
- p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
-\r
- /* Get the table selector from the attribute */\r
- idx = ((uint16_t)cl_ntoh32( p_mad->attr_mod ));\r
- \r
- /*\r
- * TODO : Setup the response to fail the MAD instead of sending\r
- * it down to the HCA.\r
- */\r
- if( idx > 31 )\r
- {\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_NOT_DONE;\r
- }\r
- if( !p_spl_qp_svc->cache.guid_block[idx].valid )\r
- {\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_NOT_DONE;\r
- }\r
-\r
- /*\r
- * If a SET, see if the set is identical to the cache,\r
- * in which case it's a no-op.\r
- */\r
- if( p_mad->method == IB_MAD_METHOD_SET )\r
- {\r
- if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ),\r
- &p_spl_qp_svc->cache.guid_block[idx].tbl, sizeof(ib_guid_info_t) ) )\r
- {\r
- /* The set is requesting a change. */\r
- return IB_NOT_DONE;\r
- }\r
- }\r
- \r
- /* Get a MAD element from the pool for the response. */\r
- status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
- if( status == IB_SUCCESS )\r
- {\r
- p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
-\r
- /* Setup the response mad. */\r
- cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
- p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
- if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
- p_smp->status = IB_SMP_DIRECTION;\r
- else\r
- p_smp->status = 0;\r
-\r
- p_guid_info = (ib_guid_info_t*)ib_smp_get_payload_ptr( p_smp );\r
-\r
- // TODO: do we need lock on the cache ?????\r
-\r
- \r
- /* Copy the cached data. */\r
- cl_memcpy( p_guid_info,\r
- &p_spl_qp_svc->cache.guid_block[idx].tbl, sizeof(ib_guid_info_t) );\r
-\r
- status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
- }\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__process_pkey_table(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN al_mad_wr_t* const p_mad_wr )\r
-{\r
-\r
- ib_mad_t *p_mad;\r
- ib_mad_element_t *p_mad_resp;\r
- ib_smp_t *p_smp;\r
- ib_pkey_table_t *p_pkey_table;\r
- uint16_t idx;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_spl_qp_svc );\r
- CL_ASSERT( p_mad_wr );\r
-\r
- /* Initialize a pointers to the MAD work request and outbound MAD. */\r
- p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
-\r
- /* Get the table selector from the attribute */\r
- idx = ((uint16_t)cl_ntoh32( p_mad->attr_mod ));\r
- \r
- /*\r
- * TODO : Setup the response to fail the MAD instead of sending\r
- * it down to the HCA.\r
- */\r
- if( idx > 2047 )\r
- {\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_NOT_DONE;\r
- }\r
-\r
-\r
- if( !p_spl_qp_svc->cache.pkey_tbl[idx].valid )\r
- {\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_NOT_DONE;\r
- }\r
-\r
- /*\r
- * If a SET, see if the set is identical to the cache,\r
- * in which case it's a no-op.\r
- */\r
- if( p_mad->method == IB_MAD_METHOD_SET )\r
- {\r
- if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ),\r
- &p_spl_qp_svc->cache.pkey_tbl[idx].tbl, sizeof(ib_pkey_table_t) ) )\r
- {\r
- /* The set is requesting a change. */\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_NOT_DONE;\r
- }\r
- }\r
- \r
- /* Get a MAD element from the pool for the response. */\r
- status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
- if( status == IB_SUCCESS )\r
- {\r
- p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
-\r
- /* Setup the response mad. */\r
- cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
- p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
- if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
- p_smp->status = IB_SMP_DIRECTION;\r
- else\r
- p_smp->status = 0;\r
-\r
- p_pkey_table = (ib_pkey_table_t*)ib_smp_get_payload_ptr( p_smp );\r
-\r
- // TODO: do we need lock on the cache ?????\r
-\r
- \r
- /* Copy the cached data. */\r
- cl_memcpy( p_pkey_table,\r
- &p_spl_qp_svc->cache.pkey_tbl[idx].tbl, sizeof(ib_pkey_table_t) );\r
-\r
- status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
- }\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-__process_slvl_table(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN al_mad_wr_t* const p_mad_wr )\r
-{\r
-\r
-\r
- ib_mad_t *p_mad;\r
- ib_mad_element_t *p_mad_resp;\r
- ib_smp_t *p_smp;\r
- ib_slvl_table_t *p_slvl_table;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_spl_qp_svc );\r
- CL_ASSERT( p_mad_wr );\r
-\r
- /* Initialize a pointers to the MAD work request and outbound MAD. */\r
- p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
-\r
- if( !p_spl_qp_svc->cache.sl_vl.valid )\r
- {\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_NOT_DONE;\r
- }\r
-\r
- /*\r
- * If a SET, see if the set is identical to the cache,\r
- * in which case it's a no-op.\r
- */\r
- if( p_mad->method == IB_MAD_METHOD_SET )\r
- {\r
- if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ),\r
- &p_spl_qp_svc->cache.sl_vl.tbl, sizeof(ib_slvl_table_t) ) )\r
- {\r
- /* The set is requesting a change. */\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_NOT_DONE;\r
- }\r
- }\r
- \r
- /* Get a MAD element from the pool for the response. */\r
- status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
- if( status == IB_SUCCESS )\r
- {\r
- p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
-\r
- /* Setup the response mad. */\r
- cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
- p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
- if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
- p_smp->status = IB_SMP_DIRECTION;\r
- else\r
- p_smp->status = 0;\r
-\r
- p_slvl_table = (ib_slvl_table_t*)ib_smp_get_payload_ptr( p_smp );\r
-\r
- // TODO: do we need lock on the cache ?????\r
-\r
- \r
- /* Copy the cached data. */\r
- cl_memcpy( p_slvl_table,\r
- &p_spl_qp_svc->cache.sl_vl.tbl, sizeof(ib_slvl_table_t) );\r
-\r
- status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
- }\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
-}\r
-\r
-\r
-\r
-static ib_api_status_t\r
-__process_vl_arb_table(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN al_mad_wr_t* const p_mad_wr )\r
-{\r
-\r
- ib_mad_t *p_mad;\r
- ib_mad_element_t *p_mad_resp;\r
- ib_smp_t *p_smp;\r
- ib_vl_arb_table_t *p_vl_arb_table;\r
- uint16_t idx;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_spl_qp_svc );\r
- CL_ASSERT( p_mad_wr );\r
-\r
- /* Initialize a pointers to the MAD work request and outbound MAD. */\r
- p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
-\r
- /* Get the table selector from the attribute */\r
- idx = ((uint16_t)(cl_ntoh32( p_mad->attr_mod ) >> 16)) - 1;\r
- \r
- /*\r
- * TODO : Setup the response to fail the MAD instead of sending\r
- * it down to the HCA.\r
- */\r
- if( idx > 3 )\r
- {\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_NOT_DONE;\r
- }\r
-\r
-\r
- if( !p_spl_qp_svc->cache.vl_arb[idx].valid )\r
- {\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_NOT_DONE;\r
- }\r
-\r
- /*\r
- * If a SET, see if the set is identical to the cache,\r
- * in which case it's a no-op.\r
- */\r
- if( p_mad->method == IB_MAD_METHOD_SET )\r
- {\r
- if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ),\r
- &p_spl_qp_svc->cache.vl_arb[idx].tbl, sizeof(ib_vl_arb_table_t) ) )\r
- {\r
- /* The set is requesting a change. */\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_NOT_DONE;\r
- }\r
- }\r
- \r
- /* Get a MAD element from the pool for the response. */\r
- status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp );\r
- if( status == IB_SUCCESS )\r
- {\r
- p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf;\r
-\r
- /* Setup the response mad. */\r
- cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE );\r
- p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET);\r
- if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
- p_smp->status = IB_SMP_DIRECTION;\r
- else\r
- p_smp->status = 0;\r
-\r
- p_vl_arb_table = (ib_vl_arb_table_t*)ib_smp_get_payload_ptr( p_smp );\r
-\r
- // TODO: do we need lock on the cache ?????\r
-\r
- \r
- /* Copy the cached data. */\r
- cl_memcpy( p_vl_arb_table,\r
- &p_spl_qp_svc->cache.pkey_tbl[idx].tbl, sizeof(ib_vl_arb_table_t) );\r
-\r
- status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
- }\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
-}\r
-\r
-\r
-\r
-\r
-/*\r
- * Process subnet administration MADs using cached data if possible.\r
- */\r
-static ib_api_status_t\r
-__process_subn_mad(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN al_mad_wr_t* const p_mad_wr )\r
-{\r
- ib_api_status_t status;\r
- ib_smp_t *p_smp;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_spl_qp_svc );\r
- CL_ASSERT( p_mad_wr );\r
-\r
- p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
-\r
- CL_ASSERT( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ||\r
- p_smp->mgmt_class == IB_MCLASS_SUBN_LID );\r
-\r
- /* simple m-key check */\r
- if( p_spl_qp_svc->m_key && p_smp->m_key == p_spl_qp_svc->m_key )\r
- {\r
- if(!p_spl_qp_svc->cache_en )\r
- {\r
- p_spl_qp_svc->cache_en = TRUE;\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_NOT_DONE;\r
- }\r
- }\r
- else\r
- {\r
- AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_SMI, ("Mkey check failed \n"));\r
- AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_SMI, ("Mkey check SMP= 0x%08x:%08x SVC = 0x%08x:%08x \n",\r
- ((uint32_t*)&p_smp->m_key)[0],((uint32_t*)&p_smp->m_key)[1],\r
- ((uint32_t*)&p_spl_qp_svc->m_key)[0],((uint32_t*)&p_spl_qp_svc->m_key)[1]));\r
-\r
- p_spl_qp_svc->cache_en = FALSE;\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_NOT_DONE;\r
- }\r
-\r
- cl_spinlock_acquire(&p_spl_qp_svc->cache_lock);\r
- \r
- switch( p_smp->attr_id )\r
- {\r
- case IB_MAD_ATTR_NODE_INFO:\r
- status = __process_node_info( p_spl_qp_svc, p_mad_wr );\r
- break;\r
-\r
- case IB_MAD_ATTR_NODE_DESC:\r
- status = __process_node_desc( p_spl_qp_svc, p_mad_wr );\r
- break;\r
-\r
- case IB_MAD_ATTR_GUID_INFO:\r
- status = __process_guid_info( p_spl_qp_svc, p_mad_wr );\r
- break;\r
-\r
- case IB_MAD_ATTR_P_KEY_TABLE:\r
- status = __process_pkey_table( p_spl_qp_svc, p_mad_wr );\r
- break;\r
- \r
- case IB_MAD_ATTR_SLVL_TABLE:\r
- status = __process_slvl_table( p_spl_qp_svc, p_mad_wr );\r
- break;\r
- \r
- case IB_MAD_ATTR_VL_ARBITRATION:\r
- status = __process_vl_arb_table( p_spl_qp_svc, p_mad_wr );\r
- break;\r
- \r
- default:\r
- status = IB_NOT_DONE;\r
- break;\r
- }\r
-\r
- cl_spinlock_release(&p_spl_qp_svc->cache_lock);\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
-}\r
-\r
-\r
-/*\r
- * Process a local MAD send work request.\r
- */\r
-static ib_api_status_t\r
-fwd_local_mad(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN al_mad_wr_t* const p_mad_wr )\r
-{\r
- ib_mad_t* p_mad;\r
- ib_smp_t* p_smp;\r
- al_mad_send_t* p_mad_send;\r
- ib_mad_element_t* p_mad_response = NULL;\r
- ib_mad_t* p_mad_response_buf;\r
- ib_api_status_t status = IB_SUCCESS;\r
- boolean_t smp_is_set;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_spl_qp_svc );\r
- CL_ASSERT( p_mad_wr );\r
-\r
- /* Initialize a pointers to the MAD work request and outbound MAD. */\r
- p_mad = get_mad_hdr_from_wr( p_mad_wr );\r
- p_smp = (ib_smp_t*)p_mad;\r
-\r
- smp_is_set = (p_smp->method == IB_MAD_METHOD_SET);\r
-\r
- /* Get a MAD element from the pool for the response. */\r
- p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr );\r
- if( p_mad_send->p_send_mad->resp_expected )\r
- {\r
- status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_response );\r
- if( status != IB_SUCCESS )\r
- {\r
- __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
- IB_WCS_LOCAL_OP_ERR );\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
- }\r
- p_mad_response_buf = p_mad_response->p_mad_buf;\r
- }\r
- else\r
- {\r
- p_mad_response_buf = NULL;\r
- }\r
-\r
- /* Adjust directed route SMPs as required by IBA. */\r
- if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
- {\r
- CL_ASSERT( !ib_smp_is_response( p_smp ) );\r
-\r
- /*\r
- * If this was a self addressed, directed route SMP, increment\r
- * the hop pointer in the request before delivery as required\r
- * by IBA. Otherwise, adjustment for remote requests occurs\r
- * during inbound processing.\r
- */\r
- if( p_smp->hop_count == 0 )\r
- p_smp->hop_ptr++;\r
- }\r
-\r
- /* Forward the locally addressed MAD to the CA interface. */\r
- status = al_local_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_ca,\r
- p_spl_qp_svc->port_num, &p_mad_wr->send_wr.dgrm.ud.h_av->av_attr, p_mad, p_mad_response_buf );\r
-\r
- /* Reset directed route SMPs as required by IBA. */\r
- if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR )\r
- {\r
- /*\r
- * If this was a self addressed, directed route SMP, decrement\r
- * the hop pointer in the response before delivery as required\r
- * by IBA. Otherwise, adjustment for remote responses occurs\r
- * during outbound processing.\r
- */\r
- if( p_smp->hop_count == 0 )\r
- {\r
- /* Adjust the request SMP. */\r
- p_smp->hop_ptr--;\r
-\r
- /* Adjust the response SMP. */\r
- if( p_mad_response_buf )\r
- {\r
- p_smp = (ib_smp_t*)p_mad_response_buf;\r
- p_smp->hop_ptr--;\r
- }\r
- }\r
- }\r
-\r
- if( status != IB_SUCCESS )\r
- {\r
- if( p_mad_response )\r
- ib_put_mad( p_mad_response );\r
-\r
- __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,\r
- IB_WCS_LOCAL_OP_ERR );\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
- }\r
-\r
- /* Check the completion status of this simulated send. */\r
- if( p_mad_send->p_send_mad->resp_expected )\r
- {\r
- /*\r
- * The SMI is uses PnP polling to refresh the base_lid and lmc.\r
- * Polling takes time, so we update the values here to prevent\r
- * the failure of LID routed MADs sent immediately following this\r
- * assignment. Check the response to see if the port info was set.\r
- */\r
- if( smp_is_set )\r
- {\r
- ib_smp_t* p_smp_response = NULL;\r
-\r
- switch( p_mad_response_buf->mgmt_class )\r
- {\r
- case IB_MCLASS_SUBN_DIR:\r
- if( ib_smp_get_status( p_smp ) == IB_SA_MAD_STATUS_SUCCESS ) \r
- {\r
- p_smp_response = p_smp;\r
- //p_port_info =\r
- // (ib_port_info_t*)ib_smp_get_payload_ptr( p_smp );\r
- }\r
- break;\r
-\r
- case IB_MCLASS_SUBN_LID:\r
- if( p_mad_response_buf->status == IB_SA_MAD_STATUS_SUCCESS )\r
- {\r
- p_smp_response = (ib_smp_t*)p_mad_response_buf;\r
- //p_port_info =\r
- // (ib_port_info_t*)ib_smp_get_payload_ptr((ib_smp_t*)p_mad_response_buf);\r
- }\r
- break;\r
-\r
- default:\r
- break;\r
- }\r
-\r
- if( p_smp_response )\r
- {\r
- switch( p_smp_response->attr_id )\r
- {\r
- case IB_MAD_ATTR_PORT_INFO:\r
- {\r
- ib_port_info_t *p_port_info =\r
- (ib_port_info_t*)ib_smp_get_payload_ptr(p_smp_response);\r
- p_spl_qp_svc->base_lid = p_port_info->base_lid;\r
- p_spl_qp_svc->lmc = ib_port_info_get_lmc( p_port_info );\r
- p_spl_qp_svc->sm_lid = p_port_info->master_sm_base_lid;\r
- p_spl_qp_svc->sm_sl = ib_port_info_get_sm_sl( p_port_info );\r
-\r
- if(p_port_info->m_key)\r
- p_spl_qp_svc->m_key = p_port_info->m_key;\r
- if (p_port_info->subnet_timeout & 0x80)\r
- {\r
- AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP,\r
- ("Client reregister event, setting sm_lid to 0.\n"));\r
- ci_ca_lock_attr(p_spl_qp_svc->obj.p_ci_ca);\r
- p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->\r
- p_port_attr[p_port_info->local_port_num - 1].sm_lid= 0;\r
- ci_ca_unlock_attr(p_spl_qp_svc->obj.p_ci_ca);\r
- }\r
- }\r
- break;\r
- case IB_MAD_ATTR_P_KEY_TABLE:\r
- case IB_MAD_ATTR_GUID_INFO:\r
- case IB_MAD_ATTR_SLVL_TABLE:\r
- case IB_MAD_ATTR_VL_ARBITRATION:\r
- spl_qp_svc_update_cache( p_spl_qp_svc, p_smp_response);\r
- break;\r
- default :\r
- break;\r
- }\r
- }\r
- }\r
- \r
-\r
- /* Construct the receive MAD element. */\r
- p_mad_response->status = IB_WCS_SUCCESS;\r
- p_mad_response->remote_qp = p_mad_wr->send_wr.dgrm.ud.remote_qp;\r
- p_mad_response->remote_lid = p_spl_qp_svc->base_lid;\r
- if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE )\r
- {\r
- p_mad_response->immediate_data = p_mad_wr->send_wr.immediate_data;\r
- p_mad_response->recv_opt |= IB_RECV_OPT_IMMEDIATE;\r
- }\r
-\r
- /*\r
- * Hand the receive MAD element to the dispatcher before completing\r
- * the send. This guarantees that the send request cannot time out.\r
- */\r
- status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_response );\r
- }\r
- \r
- __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,IB_WCS_SUCCESS);\r
-\r
- \r
- \r
- /* If the SMP was a Get, no need to trigger a PnP poll. */\r
- if( status == IB_SUCCESS && !smp_is_set )\r
- status = IB_NOT_DONE;\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Asynchronous processing thread callback to send a local MAD.\r
- */\r
-void\r
-send_local_mad_cb(\r
- IN cl_async_proc_item_t* p_item )\r
-{\r
- spl_qp_svc_t* p_spl_qp_svc;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_item );\r
- p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async );\r
-\r
- /* Process a local MAD send work request. */\r
- CL_ASSERT( p_spl_qp_svc->local_mad_wr );\r
- status = fwd_local_mad( p_spl_qp_svc, p_spl_qp_svc->local_mad_wr );\r
-\r
- /*\r
- * If we successfully processed a local MAD, which could have changed\r
- * something (e.g. the LID) on the HCA. Scan for changes.\r
- */\r
- if( status == IB_SUCCESS )\r
- pnp_poll();\r
-\r
- /*\r
- * Clear the local MAD pointer to allow processing of other MADs.\r
- * This is done after polling for attribute changes to ensure that\r
- * subsequent MADs pick up any changes performed by this one.\r
- */\r
- cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
- p_spl_qp_svc->local_mad_wr = NULL;\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
-\r
- /* Continue processing any queued MADs on the QP. */\r
- special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
-\r
- /* No longer in use by the asynchronous processing thread. */\r
- cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Special QP send completion callback.\r
- */\r
-void\r
-spl_qp_send_comp_cb(\r
- IN const ib_cq_handle_t h_cq,\r
- IN void* cq_context )\r
-{\r
- spl_qp_svc_t* p_spl_qp_svc;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( cq_context );\r
- p_spl_qp_svc = cq_context;\r
-\r
-#if defined( CL_USE_MUTEX )\r
-\r
- /* Queue an asynchronous processing item to process sends. */\r
- cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
- if( !p_spl_qp_svc->send_async_queued )\r
- {\r
- p_spl_qp_svc->send_async_queued = TRUE;\r
- ref_al_obj( &p_spl_qp_svc->obj );\r
- cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async_cb );\r
- }\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
-\r
-#else\r
-\r
- /* Invoke the callback directly. */\r
- CL_ASSERT( h_cq == p_spl_qp_svc->h_send_cq );\r
- spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_SEND );\r
-\r
- /* Continue processing any queued MADs on the QP. */\r
- special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
-\r
-#endif\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
-}\r
-\r
-\r
-\r
-#if defined( CL_USE_MUTEX )\r
-void\r
-spl_qp_send_async_cb(\r
- IN cl_async_proc_item_t* p_item )\r
-{\r
- spl_qp_svc_t* p_spl_qp_svc;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_item );\r
- p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async_cb );\r
-\r
- /* Reset asynchronous queue flag. */\r
- cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
- p_spl_qp_svc->send_async_queued = FALSE;\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
-\r
- spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_send_cq, IB_WC_SEND );\r
-\r
- /* Continue processing any queued MADs on the QP. */\r
- status = special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
- CL_ASSERT( status == IB_SUCCESS );\r
-\r
- deref_al_obj( &p_spl_qp_svc->obj );\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
-}\r
-#endif\r
-\r
-\r
-\r
-/*\r
- * Special QP receive completion callback.\r
- */\r
-void\r
-spl_qp_recv_comp_cb(\r
- IN const ib_cq_handle_t h_cq,\r
- IN void* cq_context )\r
-{\r
- spl_qp_svc_t* p_spl_qp_svc;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( cq_context );\r
- p_spl_qp_svc = cq_context;\r
-\r
-#if defined( CL_USE_MUTEX )\r
-\r
- /* Queue an asynchronous processing item to process receives. */\r
- cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
- if( !p_spl_qp_svc->recv_async_queued )\r
- {\r
- p_spl_qp_svc->recv_async_queued = TRUE;\r
- ref_al_obj( &p_spl_qp_svc->obj );\r
- cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->recv_async_cb );\r
- }\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
-\r
-#else\r
-\r
- CL_ASSERT( h_cq == p_spl_qp_svc->h_recv_cq );\r
- spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_RECV );\r
-\r
-#endif\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
-}\r
-\r
-\r
-\r
-#if defined( CL_USE_MUTEX )\r
-void\r
-spl_qp_recv_async_cb(\r
- IN cl_async_proc_item_t* p_item )\r
-{\r
- spl_qp_svc_t* p_spl_qp_svc;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_item );\r
- p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, recv_async_cb );\r
-\r
- /* Reset asynchronous queue flag. */\r
- cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
- p_spl_qp_svc->recv_async_queued = FALSE;\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
-\r
- spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_recv_cq, IB_WC_RECV );\r
-\r
- deref_al_obj( &p_spl_qp_svc->obj );\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
-}\r
-#endif\r
-\r
-\r
-\r
-/*\r
- * Special QP completion handler.\r
- */\r
-void\r
-spl_qp_comp(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN const ib_cq_handle_t h_cq,\r
- IN ib_wc_type_t wc_type )\r
-{\r
- ib_wc_t wc;\r
- ib_wc_t* p_free_wc = &wc;\r
- ib_wc_t* p_done_wc;\r
- al_mad_wr_t* p_mad_wr;\r
- al_mad_element_t* p_al_mad;\r
- ib_mad_element_t* p_mad_element;\r
- ib_smp_t* p_smp;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI_CB );\r
-\r
- CL_ASSERT( p_spl_qp_svc );\r
- CL_ASSERT( h_cq );\r
-\r
- /* Check the QP state and guard against error handling. */\r
- cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
- if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
- {\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
- return;\r
- }\r
- cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
-\r
- wc.p_next = NULL;\r
- /* Process work completions. */\r
- while( ib_poll_cq( h_cq, &p_free_wc, &p_done_wc ) == IB_SUCCESS )\r
- {\r
- /* Process completions one at a time. */\r
- CL_ASSERT( p_done_wc );\r
-\r
- /* Flushed completions are handled elsewhere. */\r
- if( wc.status == IB_WCS_WR_FLUSHED_ERR )\r
- {\r
- p_free_wc = &wc;\r
- continue;\r
- }\r
-\r
- /*\r
- * Process the work completion. Per IBA specification, the\r
- * wc.wc_type is undefined if wc.status is not IB_WCS_SUCCESS.\r
- * Use the wc_type parameter.\r
- */\r
- switch( wc_type )\r
- {\r
- case IB_WC_SEND:\r
- /* Get a pointer to the MAD work request. */\r
- p_mad_wr = (al_mad_wr_t*)((uintn_t)wc.wr_id);\r
-\r
- /* Remove the MAD work request from the service tracking queue. */\r
- cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
- cl_qlist_remove_item( &p_spl_qp_svc->send_queue,\r
- &p_mad_wr->list_item );\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
-\r
- /* Reset directed route SMPs as required by IBA. */\r
- p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr );\r
- if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR )\r
- {\r
- if( ib_smp_is_response( p_smp ) )\r
- p_smp->hop_ptr++;\r
- else\r
- p_smp->hop_ptr--;\r
- }\r
-\r
- /* Report the send completion to the dispatcher. */\r
- mad_disp_send_done( p_spl_qp_svc->h_mad_disp, p_mad_wr, &wc );\r
- break;\r
-\r
- case IB_WC_RECV:\r
-\r
- /* Initialize pointers to the MAD element. */\r
- p_al_mad = (al_mad_element_t*)((uintn_t)wc.wr_id);\r
- p_mad_element = &p_al_mad->element;\r
-\r
- /* Remove the AL MAD element from the service tracking list. */\r
- cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
-\r
- cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,\r
- &p_al_mad->list_item );\r
-\r
- /* Replenish the receive buffer. */\r
- spl_qp_svc_post_recvs( p_spl_qp_svc );\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
-\r
- /* Construct the MAD element from the receive work completion. */\r
- build_mad_recv( p_mad_element, &wc );\r
-\r
- /* Process the received MAD. */\r
- status = process_mad_recv( p_spl_qp_svc, p_mad_element );\r
-\r
- /* Discard this MAD on error. */\r
- if( status != IB_SUCCESS )\r
- {\r
- status = ib_put_mad( p_mad_element );\r
- CL_ASSERT( status == IB_SUCCESS );\r
- }\r
- break;\r
-\r
- default:\r
- CL_ASSERT( wc_type == IB_WC_SEND || wc_type == IB_WC_RECV );\r
- break;\r
- }\r
-\r
- if( wc.status != IB_WCS_SUCCESS )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("special QP completion error: %s! internal syndrome 0x%I64x\n",\r
- ib_get_wc_status_str( wc.status ), wc.vendor_specific) );\r
-\r
- /* Reset the special QP service and return. */\r
- spl_qp_svc_reset( p_spl_qp_svc );\r
- }\r
- p_free_wc = &wc;\r
- }\r
-\r
- /* Rearm the CQ. */\r
- status = ib_rearm_cq( h_cq, FALSE );\r
- CL_ASSERT( status == IB_SUCCESS );\r
-\r
- cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
- AL_EXIT( AL_DBG_SMI_CB );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Process a received MAD.\r
- */\r
-ib_api_status_t\r
-process_mad_recv(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN ib_mad_element_t* p_mad_element )\r
-{\r
- ib_smp_t* p_smp;\r
- mad_route_t route;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_spl_qp_svc );\r
- CL_ASSERT( p_mad_element );\r
-\r
- /*\r
- * If the CA has a HW agent then this MAD should have been\r
- * consumed below verbs. The fact that it was received here\r
- * indicates that it should be forwarded to the dispatcher\r
- * for delivery to a class manager. Otherwise, determine how\r
- * the MAD should be routed.\r
- */\r
- route = ROUTE_DISPATCHER;\r
- if( check_local_mad( p_spl_qp_svc->h_qp ) )\r
- {\r
- /*\r
- * SMP and GMP processing is branched here to handle overlaps\r
- * between class methods and attributes.\r
- */\r
- switch( p_mad_element->p_mad_buf->mgmt_class )\r
- {\r
- case IB_MCLASS_SUBN_DIR:\r
- /* Perform special checks on directed route SMPs. */\r
- p_smp = (ib_smp_t*)p_mad_element->p_mad_buf;\r
-\r
- if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) ||\r
- ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) )\r
- {\r
- route = ROUTE_DISCARD;\r
- }\r
- else if( ib_smp_is_response( p_smp ) )\r
- {\r
- /*\r
- * This node is the destination of the response. Discard\r
- * the source LID or hop pointer are incorrect.\r
- */\r
- if( p_smp->dr_slid == IB_LID_PERMISSIVE )\r
- {\r
- if( p_smp->hop_ptr == 1 )\r
- {\r
- p_smp->hop_ptr--; /* Adjust ptr per IBA spec. */\r
- }\r
- else\r
- {\r
- route = ROUTE_DISCARD;\r
- }\r
- }\r
- else if( ( p_smp->dr_slid < p_spl_qp_svc->base_lid ) ||\r
- ( p_smp->dr_slid >= p_spl_qp_svc->base_lid +\r
- ( 1 << p_spl_qp_svc->lmc ) ) )\r
- {\r
- route = ROUTE_DISCARD;\r
- }\r
- }\r
- else\r
- {\r
- /*\r
- * This node is the destination of the request. Discard\r
- * the destination LID or hop pointer are incorrect.\r
- */\r
- if( p_smp->dr_dlid == IB_LID_PERMISSIVE )\r
- {\r
- if( p_smp->hop_count == p_smp->hop_ptr )\r
- {\r
- p_smp->return_path[ p_smp->hop_ptr++ ] =\r
- p_spl_qp_svc->port_num; /* Set path per IBA spec. */\r
- }\r
- else\r
- {\r
- route = ROUTE_DISCARD;\r
- }\r
- }\r
- else if( ( p_smp->dr_dlid < p_spl_qp_svc->base_lid ) ||\r
- ( p_smp->dr_dlid >= p_spl_qp_svc->base_lid +\r
- ( 1 << p_spl_qp_svc->lmc ) ) )\r
- {\r
- route = ROUTE_DISCARD;\r
- }\r
- }\r
-\r
- if( route == ROUTE_DISCARD ) break;\r
- /* else fall through next case */\r
-\r
- case IB_MCLASS_SUBN_LID:\r
- route = route_recv_smp( p_mad_element );\r
- break;\r
-\r
- case IB_MCLASS_PERF:\r
- /* Process the received GMP. */\r
- switch( p_mad_element->p_mad_buf->method )\r
- {\r
- case IB_MAD_METHOD_GET:\r
- case IB_MAD_METHOD_SET:\r
- route = ROUTE_LOCAL;\r
- break;\r
- default:\r
- break;\r
- }\r
- break;\r
-\r
- case IB_MCLASS_BM:\r
- route = route_recv_gmp( p_mad_element );\r
- break;\r
-\r
- case IB_MCLASS_SUBN_ADM:\r
- case IB_MCLASS_DEV_MGMT:\r
- case IB_MCLASS_COMM_MGMT:\r
- case IB_MCLASS_SNMP:\r
- break;\r
-\r
- default:\r
- /* Route vendor specific MADs to the HCA provider. */\r
- if( ib_class_is_vendor_specific(\r
- p_mad_element->p_mad_buf->mgmt_class ) )\r
- {\r
- route = route_recv_gmp( p_mad_element );\r
- }\r
- break;\r
- }\r
- }\r
-\r
- /* Route the MAD. */\r
- if( is_discard( route ) )\r
- status = IB_ERROR;\r
- else if( is_dispatcher( route ) )\r
- status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_element );\r
- else if( is_remote( route ) )\r
- status = forward_sm_trap( p_spl_qp_svc, p_mad_element );\r
- else\r
- status = recv_local_mad( p_spl_qp_svc, p_mad_element );\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Route a received SMP.\r
- */\r
-mad_route_t\r
-route_recv_smp(\r
- IN ib_mad_element_t* p_mad_element )\r
-{\r
- mad_route_t route;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_mad_element );\r
-\r
- /* Process the received SMP. */\r
- switch( p_mad_element->p_mad_buf->method )\r
- {\r
- case IB_MAD_METHOD_GET:\r
- case IB_MAD_METHOD_SET:\r
- route = route_recv_smp_attr( p_mad_element );\r
- break;\r
-\r
- case IB_MAD_METHOD_TRAP:\r
- /*\r
- * Special check to route locally generated traps to the remote SM.\r
- * Distinguished from other receives by the p_wc->recv.ud.recv_opt\r
- * IB_RECV_OPT_FORWARD flag.\r
- *\r
- * Note that because forwarded traps use AL MAD services, the upper\r
- * 32-bits of the TID are reserved by the access layer. When matching\r
- * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
- * TID.\r
- */\r
- AL_PRINT(TRACE_LEVEL_INFORMATION, AL_DBG_SMI, ("Trap TID = 0x%08x:%08x \n",\r
- ((uint32_t*)&p_mad_element->p_mad_buf->trans_id)[0],\r
- ((uint32_t*)&p_mad_element->p_mad_buf->trans_id)[1]));\r
-\r
- route = ( p_mad_element->recv_opt & IB_RECV_OPT_FORWARD ) ?\r
- ROUTE_REMOTE : ROUTE_DISPATCHER;\r
- break;\r
-\r
- case IB_MAD_METHOD_TRAP_REPRESS:\r
- /*\r
- * Note that because forwarded traps use AL MAD services, the upper\r
- * 32-bits of the TID are reserved by the access layer. When matching\r
- * a Trap Repress MAD, the SMA must only use the lower 32-bits of the\r
- * TID.\r
- */\r
- AL_PRINT(TRACE_LEVEL_INFORMATION, AL_DBG_SMI, ("TrapRepress TID = 0x%08x:%08x \n",\r
- ((uint32_t*)&p_mad_element->p_mad_buf->trans_id)[0],\r
- ((uint32_t*)&p_mad_element->p_mad_buf->trans_id)[1]));\r
-\r
- route = ROUTE_LOCAL;\r
- break;\r
-\r
- default:\r
- route = ROUTE_DISPATCHER;\r
- break;\r
- }\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return route;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Route received SMP attributes.\r
- */\r
-mad_route_t\r
-route_recv_smp_attr(\r
- IN ib_mad_element_t* p_mad_element )\r
-{\r
- mad_route_t route;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_mad_element );\r
-\r
- /* Process the received SMP attributes. */\r
- switch( p_mad_element->p_mad_buf->attr_id )\r
- {\r
- case IB_MAD_ATTR_NODE_DESC:\r
- case IB_MAD_ATTR_NODE_INFO:\r
- case IB_MAD_ATTR_GUID_INFO:\r
- case IB_MAD_ATTR_PORT_INFO:\r
- case IB_MAD_ATTR_P_KEY_TABLE:\r
- case IB_MAD_ATTR_SLVL_TABLE:\r
- case IB_MAD_ATTR_VL_ARBITRATION:\r
- case IB_MAD_ATTR_VENDOR_DIAG:\r
- case IB_MAD_ATTR_LED_INFO:\r
- case IB_MAD_ATTR_SWITCH_INFO:\r
- route = ROUTE_LOCAL;\r
- break;\r
-\r
- default:\r
- route = ROUTE_DISPATCHER;\r
- break;\r
- }\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return route;\r
-}\r
-\r
-\r
-/*\r
- * Route a received GMP.\r
- */\r
-mad_route_t\r
-route_recv_gmp(\r
- IN ib_mad_element_t* p_mad_element )\r
-{\r
- mad_route_t route;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_mad_element );\r
-\r
- /* Process the received GMP. */\r
- switch( p_mad_element->p_mad_buf->method )\r
- {\r
- case IB_MAD_METHOD_GET:\r
- case IB_MAD_METHOD_SET:\r
- /* Route vendor specific MADs to the HCA provider. */\r
- if( ib_class_is_vendor_specific(\r
- p_mad_element->p_mad_buf->mgmt_class ) )\r
- {\r
- route = ROUTE_LOCAL;\r
- }\r
- else\r
- {\r
- route = route_recv_gmp_attr( p_mad_element );\r
- }\r
- break;\r
-\r
- default:\r
- route = ROUTE_DISPATCHER;\r
- break;\r
- }\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return route;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Route received GMP attributes.\r
- */\r
-mad_route_t\r
-route_recv_gmp_attr(\r
- IN ib_mad_element_t* p_mad_element )\r
-{\r
- mad_route_t route;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_mad_element );\r
-\r
- /* Process the received GMP attributes. */\r
- if( p_mad_element->p_mad_buf->attr_id == IB_MAD_ATTR_CLASS_PORT_INFO )\r
- route = ROUTE_LOCAL;\r
- else\r
- route = ROUTE_DISPATCHER;\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return route;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Forward a locally generated Subnet Management trap.\r
- */\r
-ib_api_status_t\r
-forward_sm_trap(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN ib_mad_element_t* p_mad_element )\r
-{\r
- ib_av_attr_t av_attr;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_spl_qp_svc );\r
- CL_ASSERT( p_mad_element );\r
-\r
- /* Check the SMP class. */\r
- if( p_mad_element->p_mad_buf->mgmt_class != IB_MCLASS_SUBN_LID )\r
- {\r
- /*\r
- * Per IBA Specification Release 1.1 Section 14.2.2.1,\r
- * "C14-5: Only a SM shall originate a directed route SMP."\r
- * Therefore all traps should be LID routed; drop this one.\r
- */\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_ERROR;\r
- }\r
-\r
- if(p_spl_qp_svc->sm_lid == p_spl_qp_svc->base_lid)\r
- return mad_disp_recv_done(p_spl_qp_svc->h_mad_disp,p_mad_element);\r
- \r
- /* Create an address vector for the SM. */\r
- cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );\r
- av_attr.port_num = p_spl_qp_svc->port_num;\r
- av_attr.sl = p_spl_qp_svc->sm_sl;\r
- av_attr.dlid = p_spl_qp_svc->sm_lid;\r
- av_attr.grh_valid = FALSE;\r
-\r
- status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,\r
- &av_attr, &p_mad_element->h_av );\r
-\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
- }\r
-\r
- /* Complete the initialization of the MAD element. */\r
- p_mad_element->p_next = NULL;\r
- p_mad_element->remote_qkey = IB_QP_PRIVILEGED_Q_KEY;\r
- p_mad_element->resp_expected = FALSE;\r
-\r
- /* Clear context1 for proper send completion callback processing. */\r
- p_mad_element->context1 = NULL;\r
-\r
- /*\r
- * Forward the trap. Note that because forwarded traps use AL MAD\r
- * services, the upper 32-bits of the TID are reserved by the access\r
- * layer. When matching a Trap Repress MAD, the SMA must only use\r
- * the lower 32-bits of the TID.\r
- */\r
- status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_element, NULL );\r
-\r
- if( status != IB_SUCCESS )\r
- ib_destroy_av( p_mad_element->h_av );\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
-}\r
-\r
-\r
-/*\r
- * Process a locally routed MAD received from the special QP.\r
- */\r
-ib_api_status_t\r
-recv_local_mad(\r
- IN spl_qp_svc_t* p_spl_qp_svc,\r
- IN ib_mad_element_t* p_mad_request )\r
-{\r
- ib_mad_t* p_mad_hdr;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_spl_qp_svc );\r
- CL_ASSERT( p_mad_request );\r
-\r
- /* Initialize the MAD element. */\r
- p_mad_hdr = ib_get_mad_buf( p_mad_request );\r
- p_mad_request->context1 = p_mad_request;\r
-\r
- /* Save the TID. */\r
- p_mad_request->context2 =\r
- (void*)(uintn_t)al_get_al_tid( p_mad_hdr->trans_id );\r
-/*\r
- * Disable warning about passing unaligned 64-bit value.\r
- * The value is always aligned given how buffers are allocated\r
- * and given the layout of a MAD.\r
- */\r
-#pragma warning( push, 3 )\r
- al_set_al_tid( &p_mad_hdr->trans_id, 0 );\r
-#pragma warning( pop )\r
-\r
- /*\r
- * We need to get a response from the local HCA to this MAD only if this\r
- * MAD is not itself a response.\r
- */\r
- p_mad_request->resp_expected = !( ib_mad_is_response( p_mad_hdr ) ||\r
- ( p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS ) );\r
- p_mad_request->timeout_ms = LOCAL_MAD_TIMEOUT;\r
- p_mad_request->send_opt = IB_SEND_OPT_LOCAL;\r
-\r
- /* Send the locally addressed MAD request to the CA for processing. */\r
- status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_request, NULL );\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return status;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Special QP alias send completion callback.\r
- */\r
-void\r
-spl_qp_alias_send_cb(\r
- IN ib_mad_svc_handle_t h_mad_svc,\r
- IN void* mad_svc_context,\r
- IN ib_mad_element_t* p_mad_element )\r
-{\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- UNUSED_PARAM( h_mad_svc );\r
- UNUSED_PARAM( mad_svc_context );\r
- CL_ASSERT( p_mad_element );\r
-\r
- if( p_mad_element->h_av )\r
- {\r
- status = ib_destroy_av( p_mad_element->h_av );\r
- CL_ASSERT( status == IB_SUCCESS );\r
- }\r
-\r
- status = ib_put_mad( p_mad_element );\r
- CL_ASSERT( status == IB_SUCCESS );\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Special QP alias receive completion callback.\r
- */\r
-void\r
-spl_qp_alias_recv_cb(\r
- IN ib_mad_svc_handle_t h_mad_svc,\r
- IN void* mad_svc_context,\r
- IN ib_mad_element_t* p_mad_response )\r
-{\r
- spl_qp_svc_t* p_spl_qp_svc;\r
- ib_mad_element_t* p_mad_request;\r
- ib_mad_t* p_mad_hdr;\r
- ib_av_attr_t av_attr;\r
- ib_api_status_t status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( mad_svc_context );\r
- CL_ASSERT( p_mad_response );\r
- CL_ASSERT( p_mad_response->send_context1 );\r
-\r
- /* Initialize pointers. */\r
- p_spl_qp_svc = mad_svc_context;\r
- p_mad_request = p_mad_response->send_context1;\r
- p_mad_hdr = ib_get_mad_buf( p_mad_response );\r
-\r
- /* Restore the TID, so it will match on the remote side. */\r
-#pragma warning( push, 3 )\r
- al_set_al_tid( &p_mad_hdr->trans_id,\r
- (uint32_t)(uintn_t)p_mad_response->send_context2 );\r
-#pragma warning( pop )\r
-\r
- /* Set the remote QP. */\r
- p_mad_response->remote_qp = p_mad_request->remote_qp;\r
- p_mad_response->remote_qkey = p_mad_request->remote_qkey;\r
-\r
- /* Prepare to create an address vector. */\r
- cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );\r
- av_attr.port_num = p_spl_qp_svc->port_num;\r
- av_attr.sl = p_mad_request->remote_sl;\r
- av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS;\r
- av_attr.path_bits = p_mad_request->path_bits;\r
- if( p_mad_request->grh_valid )\r
- {\r
- cl_memcpy( &av_attr.grh, p_mad_request->p_grh, sizeof( ib_grh_t ) );\r
- av_attr.grh.src_gid = p_mad_request->p_grh->dest_gid;\r
- av_attr.grh.dest_gid = p_mad_request->p_grh->src_gid;\r
- av_attr.grh_valid = TRUE;\r
- }\r
- if( ( p_mad_hdr->mgmt_class == IB_MCLASS_SUBN_DIR ) &&\r
- ( ((ib_smp_t *)p_mad_hdr)->dr_dlid == IB_LID_PERMISSIVE ) )\r
- av_attr.dlid = IB_LID_PERMISSIVE;\r
- else\r
- av_attr.dlid = p_mad_request->remote_lid;\r
-\r
- /* Create an address vector. */\r
- status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias,\r
- &av_attr, &p_mad_response->h_av );\r
-\r
- if( status != IB_SUCCESS )\r
- {\r
- ib_put_mad( p_mad_response );\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return;\r
- }\r
-\r
- /* Send the response. */\r
- status = ib_send_mad( h_mad_svc, p_mad_response, NULL );\r
-\r
- if( status != IB_SUCCESS )\r
- {\r
- ib_destroy_av( p_mad_response->h_av );\r
- ib_put_mad( p_mad_response );\r
- }\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Post receive buffers to a special QP.\r
- */\r
-static ib_api_status_t\r
-spl_qp_svc_post_recvs(\r
- IN spl_qp_svc_t* const p_spl_qp_svc )\r
-{\r
- ib_mad_element_t* p_mad_element;\r
- al_mad_element_t* p_al_element;\r
- ib_recv_wr_t recv_wr;\r
- ib_api_status_t status = IB_SUCCESS;\r
-\r
- /* Attempt to post receive buffers up to the max_qp_depth limit. */\r
- while( cl_qlist_count( &p_spl_qp_svc->recv_queue ) <\r
- (int32_t)p_spl_qp_svc->max_qp_depth )\r
- {\r
- /* Get a MAD element from the pool. */\r
- status = ib_get_mad( p_spl_qp_svc->obj.p_ci_ca->pool_key,\r
- MAD_BLOCK_SIZE, &p_mad_element );\r
-\r
- if( status != IB_SUCCESS ) break;\r
-\r
- p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t,\r
- element );\r
-\r
- /* Build the receive work request. */\r
- recv_wr.p_next = NULL;\r
- recv_wr.wr_id = (uintn_t)p_al_element;\r
- recv_wr.num_ds = 1;\r
- recv_wr.ds_array = &p_al_element->grh_ds;\r
-\r
- /* Queue the receive on the service tracking list. */\r
- cl_qlist_insert_tail( &p_spl_qp_svc->recv_queue,\r
- &p_al_element->list_item );\r
-\r
- /* Post the receive. */\r
- status = ib_post_recv( p_spl_qp_svc->h_qp, &recv_wr, NULL );\r
-\r
- if( status != IB_SUCCESS )\r
- {\r
- AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
- ("Failed to post receive %016I64x\n",\r
- (LONG_PTR)p_al_element) );\r
- cl_qlist_remove_item( &p_spl_qp_svc->recv_queue,\r
- &p_al_element->list_item );\r
-\r
- ib_put_mad( p_mad_element );\r
- break;\r
- }\r
- }\r
-\r
- return status;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Special QP service asynchronous event callback.\r
- */\r
-void\r
-spl_qp_svc_event_cb(\r
- IN ib_async_event_rec_t *p_event_rec )\r
-{\r
- spl_qp_svc_t* p_spl_qp_svc;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_event_rec );\r
- CL_ASSERT( p_event_rec->context );\r
-\r
- if( p_event_rec->code == IB_AE_SQ_DRAINED )\r
- {\r
- AL_EXIT( AL_DBG_SMI );\r
- return;\r
- }\r
-\r
- p_spl_qp_svc = p_event_rec->context;\r
-\r
- spl_qp_svc_reset( p_spl_qp_svc );\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Special QP service reset.\r
- */\r
-void\r
-spl_qp_svc_reset(\r
- IN spl_qp_svc_t* p_spl_qp_svc )\r
-{\r
- cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
-\r
- if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
- {\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
- return;\r
- }\r
-\r
- /* Change the special QP service to the error state. */\r
- p_spl_qp_svc->state = SPL_QP_ERROR;\r
-\r
- /* Flag the service as in use by the asynchronous processing thread. */\r
- cl_atomic_inc( &p_spl_qp_svc->in_use_cnt );\r
-\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
-\r
- /* Queue an asynchronous processing item to reset the special QP. */\r
- cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->reset_async );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Asynchronous processing thread callback to reset the special QP service.\r
- */\r
-void\r
-spl_qp_svc_reset_cb(\r
- IN cl_async_proc_item_t* p_item )\r
-{\r
- spl_qp_svc_t* p_spl_qp_svc;\r
- cl_list_item_t* p_list_item;\r
- ib_wc_t wc;\r
- ib_wc_t* p_free_wc;\r
- ib_wc_t* p_done_wc;\r
- al_mad_wr_t* p_mad_wr;\r
- al_mad_element_t* p_al_mad;\r
- ib_qp_mod_t qp_mod;\r
- ib_api_status_t status;\r
- cl_qlist_t mad_wr_list;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_item );\r
- p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, reset_async );\r
-\r
- /* Wait here until the special QP service is only in use by this thread. */\r
- while( p_spl_qp_svc->in_use_cnt != 1 )\r
- {\r
- cl_thread_suspend( 0 );\r
- }\r
-\r
- /* Change the QP to the RESET state. */\r
- cl_memclr( &qp_mod, sizeof( ib_qp_mod_t ) );\r
- qp_mod.req_state = IB_QPS_RESET;\r
-\r
- status = ib_modify_qp( p_spl_qp_svc->h_qp, &qp_mod );\r
- CL_ASSERT( status == IB_SUCCESS );\r
-\r
- /* Return receive MAD elements to the pool. */\r
- cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
- for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue );\r
- p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue );\r
- p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) )\r
- {\r
- p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item );\r
-\r
- status = ib_put_mad( &p_al_mad->element );\r
- CL_ASSERT( status == IB_SUCCESS );\r
- }\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
-\r
- /* Re-initialize the QP. */\r
- status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL );\r
- CL_ASSERT( status == IB_SUCCESS );\r
-\r
- /* Poll to remove any remaining send completions from the CQ. */\r
- do\r
- {\r
- cl_memclr( &wc, sizeof( ib_wc_t ) );\r
- p_free_wc = &wc;\r
- status = ib_poll_cq( p_spl_qp_svc->h_send_cq, &p_free_wc, &p_done_wc );\r
-\r
- } while( status == IB_SUCCESS );\r
-\r
- /* Post receive buffers. */\r
- cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
- spl_qp_svc_post_recvs( p_spl_qp_svc );\r
-\r
- /* Re-queue any outstanding MAD send operations. */\r
- cl_qlist_init( &mad_wr_list );\r
- cl_qlist_insert_list_tail( &mad_wr_list, &p_spl_qp_svc->send_queue );\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
-\r
- for( p_list_item = cl_qlist_remove_head( &mad_wr_list );\r
- p_list_item != cl_qlist_end( &mad_wr_list );\r
- p_list_item = cl_qlist_remove_head( &mad_wr_list ) )\r
- {\r
- p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item );\r
- special_qp_queue_mad( p_spl_qp_svc->h_qp, p_mad_wr );\r
- }\r
-\r
- cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
- if( p_spl_qp_svc->state == SPL_QP_ERROR )\r
- {\r
- /* The QP is ready. Change the state. */\r
- p_spl_qp_svc->state = SPL_QP_ACTIVE;\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
-\r
- /* Re-arm the CQs. */\r
- status = ib_rearm_cq( p_spl_qp_svc->h_recv_cq, FALSE );\r
- CL_ASSERT( status == IB_SUCCESS );\r
- status = ib_rearm_cq( p_spl_qp_svc->h_send_cq, FALSE );\r
- CL_ASSERT( status == IB_SUCCESS );\r
-\r
- /* Resume send processing. */\r
- special_qp_resume_sends( p_spl_qp_svc->h_qp );\r
- }\r
- else\r
- {\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
- }\r
-\r
- /* No longer in use by the asynchronous processing thread. */\r
- cl_atomic_dec( &p_spl_qp_svc->in_use_cnt );\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Special QP alias asynchronous event callback.\r
- */\r
-void\r
-spl_qp_alias_event_cb(\r
- IN ib_async_event_rec_t *p_event_rec )\r
-{\r
- UNUSED_PARAM( p_event_rec );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Acquire the SMI dispatcher for the given port.\r
- */\r
-ib_api_status_t\r
-acquire_smi_disp(\r
- IN const ib_net64_t port_guid,\r
- OUT al_mad_disp_handle_t* const ph_mad_disp )\r
-{\r
- CL_ASSERT( gp_spl_qp_mgr );\r
- return acquire_svc_disp( &gp_spl_qp_mgr->smi_map, port_guid, ph_mad_disp );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Acquire the GSI dispatcher for the given port.\r
- */\r
-ib_api_status_t\r
-acquire_gsi_disp(\r
- IN const ib_net64_t port_guid,\r
- OUT al_mad_disp_handle_t* const ph_mad_disp )\r
-{\r
- CL_ASSERT( gp_spl_qp_mgr );\r
- return acquire_svc_disp( &gp_spl_qp_mgr->gsi_map, port_guid, ph_mad_disp );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Acquire the service dispatcher for the given port.\r
- */\r
-ib_api_status_t\r
-acquire_svc_disp(\r
- IN const cl_qmap_t* const p_svc_map,\r
- IN const ib_net64_t port_guid,\r
- OUT al_mad_disp_handle_t *ph_mad_disp )\r
-{\r
- cl_map_item_t* p_svc_item;\r
- spl_qp_svc_t* p_spl_qp_svc;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_svc_map );\r
- CL_ASSERT( gp_spl_qp_mgr );\r
-\r
- /* Search for the SMI or GSI service for the given port. */\r
- cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
- p_svc_item = cl_qmap_get( p_svc_map, port_guid );\r
- cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
- if( p_svc_item == cl_qmap_end( p_svc_map ) )\r
- {\r
- /* The port does not have an active agent. */\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_INVALID_GUID;\r
- }\r
-\r
- p_spl_qp_svc = PARENT_STRUCT( p_svc_item, spl_qp_svc_t, map_item );\r
-\r
- /* Found a match. Get MAD dispatcher handle. */\r
- *ph_mad_disp = p_spl_qp_svc->h_mad_disp;\r
-\r
- /* Reference the MAD dispatcher on behalf of the client. */\r
- ref_al_obj( &p_spl_qp_svc->h_mad_disp->obj );\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
- return IB_SUCCESS;\r
-}\r
-\r
-\r
-\r
-/*\r
- * Force a poll for CA attribute changes.\r
- */\r
-void\r
-force_smi_poll(\r
- void )\r
-{\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- /*\r
- * Stop the poll timer. Just invoke the timer callback directly to\r
- * save the thread context switching.\r
- */\r
- smi_poll_timer_cb( gp_spl_qp_mgr );\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Poll for CA port attribute changes.\r
- */\r
-void\r
-smi_poll_timer_cb(\r
- IN void* context )\r
-{\r
- cl_status_t cl_status;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( context );\r
- CL_ASSERT( gp_spl_qp_mgr == context );\r
- UNUSED_PARAM( context );\r
-\r
- /*\r
- * Scan for changes on the local HCAs. Since the PnP manager has its\r
- * own thread for processing changes, we kick off that thread in parallel\r
- * reposting receive buffers to the SQP agents.\r
- */\r
- pnp_poll();\r
-\r
- /*\r
- * To handle the case where force_smi_poll is called at the same time\r
- * the timer expires, check if the asynchronous processing item is in\r
- * use. If it is already in use, it means that we're about to poll\r
- * anyway, so just ignore this call.\r
- */\r
- cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock );\r
-\r
- /* Perform port processing on the special QP agents. */\r
- cl_qlist_apply_func( &gp_spl_qp_mgr->obj.obj_list, smi_post_recvs,\r
- gp_spl_qp_mgr );\r
-\r
- /* Determine if there are any special QP agents to poll. */\r
- if( !cl_is_qlist_empty( &gp_spl_qp_mgr->obj.obj_list ) && g_smi_poll_interval )\r
- {\r
- /* Restart the polling timer. */\r
- cl_status =\r
- cl_timer_start( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval );\r
- CL_ASSERT( cl_status == CL_SUCCESS );\r
- }\r
- cl_spinlock_release( &gp_spl_qp_mgr->obj.lock );\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
-}\r
-\r
-\r
-\r
-/*\r
- * Post receive buffers to a special QP.\r
- */\r
-void\r
-smi_post_recvs(\r
- IN cl_list_item_t* const p_list_item,\r
- IN void* context )\r
-{\r
- al_obj_t* p_obj;\r
- spl_qp_svc_t* p_spl_qp_svc;\r
-\r
- AL_ENTER( AL_DBG_SMI );\r
-\r
- CL_ASSERT( p_list_item );\r
- UNUSED_PARAM( context );\r
-\r
- p_obj = PARENT_STRUCT( p_list_item, al_obj_t, pool_item );\r
- p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj );\r
-\r
- cl_spinlock_acquire( &p_spl_qp_svc->obj.lock );\r
- if( p_spl_qp_svc->state != SPL_QP_ACTIVE )\r
- {\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
- return;\r
- }\r
-\r
- spl_qp_svc_post_recvs( p_spl_qp_svc );\r
- cl_spinlock_release( &p_spl_qp_svc->obj.lock );\r
-\r
- AL_EXIT( AL_DBG_SMI );\r
-}\r
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: al_smi.h 744 2007-07-31 19:04:15Z leonidk $\r
- */\r
-\r
-\r
-#if !defined( __AL_SMI_H__ )\r
-#define __AL_SMI_H__\r
-\r
-\r
-#include <iba/ib_types.h>\r
-#include <complib/cl_qmap.h>\r
-#include "al_common.h"\r
-#include "al_mad.h"\r
-\r
-\r
-/* Global special QP manager */\r
-typedef struct _spl_qp_mgr\r
-{\r
- al_obj_t obj; /* Child of gp_al_mgr */\r
- ib_pnp_handle_t h_qp0_pnp; /* Handle for QP0 port PnP events */\r
- ib_pnp_handle_t h_qp1_pnp; /* Handle for QP1 port PnP events */\r
-\r
- cl_timer_t poll_timer; /* Timer for polling HW SMIs */\r
-\r
- cl_qmap_t smi_map; /* List of SMI services */\r
- cl_qmap_t gsi_map; /* List of GSI services */\r
-\r
-} spl_qp_mgr_t;\r
-\r
-\r
-\r
-typedef enum _spl_qp_svc_state\r
-{\r
- SPL_QP_INIT = 0,\r
- SPL_QP_ACTIVE,\r
- SPL_QP_ERROR,\r
- SPL_QP_DESTROYING\r
-\r
-} spl_qp_svc_state_t;\r
-\r
-/*\r
- * Attribute cache for port info saved to expedite local MAD processing.\r
- * Note that the cache accounts for the worst case GID and PKEY table size\r
- * but is allocated from paged pool, so it's nothing to worry about.\r
- */\r
-\r
-typedef struct _guid_block\r
-{\r
- boolean_t valid;\r
- ib_guid_info_t tbl;\r
-\r
-} guid_block_t;\r
-\r
-\r
-typedef struct _pkey_block\r
-{\r
- boolean_t valid;\r
- ib_pkey_table_t tbl;\r
-\r
-} pkey_block_t;\r
-\r
-typedef struct _sl_vl_cache\r
-{\r
- boolean_t valid;\r
- ib_slvl_table_t tbl;\r
-\r
-} sl_vl_cache_t;\r
-\r
-typedef struct _vl_arb_block\r
-{\r
- boolean_t valid;\r
- ib_vl_arb_table_t tbl;\r
-\r
-} vl_arb_block_t;\r
-\r
-typedef struct _attr_cache\r
-{\r
- guid_block_t guid_block[32];\r
- pkey_block_t pkey_tbl[2048];\r
- sl_vl_cache_t sl_vl;\r
- vl_arb_block_t vl_arb[4];\r
-\r
-} spl_qp_cache_t;\r
-\r
-\r
-/* Per port special QP service */\r
-typedef struct _spl_qp_svc\r
-{\r
- al_obj_t obj; /* Child of spl_qp_agent_t */\r
- cl_map_item_t map_item; /* Item on SMI/GSI list */\r
-\r
- net64_t port_guid;\r
- uint8_t port_num;\r
- ib_net16_t base_lid;\r
- uint8_t lmc;\r
- \r
- ib_net16_t sm_lid;\r
- uint8_t sm_sl;\r
- ib_net64_t m_key;\r
-\r
- spl_qp_cache_t cache;\r
- cl_spinlock_t cache_lock;\r
- boolean_t cache_en;\r
- \r
- al_mad_disp_handle_t h_mad_disp;\r
- ib_cq_handle_t h_send_cq;\r
- ib_cq_handle_t h_recv_cq;\r
- ib_qp_handle_t h_qp;\r
-\r
-#if defined( CL_USE_MUTEX )\r
- boolean_t send_async_queued;\r
- cl_async_proc_item_t send_async_cb;\r
- boolean_t recv_async_queued;\r
- cl_async_proc_item_t recv_async_cb;\r
-#endif\r
-\r
- spl_qp_svc_state_t state;\r
- atomic32_t in_use_cnt;\r
- cl_async_proc_item_t reset_async;\r
-\r
- uint32_t max_qp_depth;\r
- al_mad_wr_t* local_mad_wr;\r
- cl_qlist_t send_queue;\r
- cl_qlist_t recv_queue;\r
- cl_async_proc_item_t send_async;\r
-\r
- ib_qp_handle_t h_qp_alias;\r
- ib_pool_key_t pool_key;\r
- ib_mad_svc_handle_t h_mad_svc;\r
-\r
-} spl_qp_svc_t;\r
-\r
-\r
-typedef enum _mad_route\r
-{\r
- ROUTE_DISPATCHER = 0,\r
- ROUTE_REMOTE,\r
- ROUTE_LOCAL,\r
- ROUTE_LOOPBACK,\r
- ROUTE_DISCARD\r
-\r
-} mad_route_t;\r
-\r
-\r
-static inline boolean_t\r
-is_dispatcher(\r
- IN const mad_route_t route )\r
-{\r
- return( route == ROUTE_DISPATCHER );\r
-}\r
-\r
-\r
-static inline boolean_t\r
-is_remote(\r
- IN const mad_route_t route )\r
-{\r
- return( route == ROUTE_REMOTE );\r
-}\r
-\r
-\r
-static inline boolean_t\r
-is_discard(\r
- IN const mad_route_t route )\r
-{\r
- return( route == ROUTE_DISCARD );\r
-}\r
-\r
-\r
-static inline boolean_t\r
-is_loopback(\r
- IN const mad_route_t route )\r
-{\r
- return( route == ROUTE_LOOPBACK );\r
-}\r
-\r
-\r
-static inline boolean_t\r
-is_local(\r
- IN const mad_route_t route )\r
-{\r
- /*\r
- * Loopback implies a locally routed MAD. Discarded MADs are always\r
- * handled locally to maintain proper order of work completions.\r
- */\r
- return( ( route == ROUTE_LOCAL ) ||\r
- is_loopback( route ) || is_discard( route ) );\r
-}\r
-\r
-\r
-ib_api_status_t\r
-create_spl_qp_mgr(\r
- IN al_obj_t* const p_parent_obj );\r
-\r
-\r
-ib_api_status_t\r
-acquire_smi_disp(\r
- IN const ib_net64_t port_guid,\r
- OUT al_mad_disp_handle_t* const ph_mad_disp );\r
-\r
-\r
-ib_api_status_t\r
-acquire_gsi_disp(\r
- IN const ib_net64_t port_guid,\r
- OUT al_mad_disp_handle_t* const ph_mad_disp );\r
-\r
-\r
-ib_api_status_t\r
-spl_qp_svc_send(\r
- IN const ib_qp_handle_t h_qp,\r
- IN ib_send_wr_t* const p_send_wr );\r
-\r
-\r
-void\r
-force_smi_poll(\r
- void );\r
-\r
-\r
-#endif\r
+++ /dev/null
-[CatalogHeader]\r
-Name=ibal.cat\r
-PublicVersion=0x0000001\r
-EncodingType=0x00010001\r
-CATATTR1=0x10010001:OSAttr:2:6.0\r
-[CatalogFiles]\r
-<hash>ibal.inf=ibal.inf\r
-<hash>ibal.sys=ibal.sys\r
-<hash>ibal.dll=ibal.dll\r
-<hash>ibald.dll=ibald.dll\r
-<hash>complib.dll=complib.dll\r
-<hash>complibd.dll=complibd.dll\r
-<hash>cl32d.dll=cl32d.dll\r
-<hash>cl32.dll=cl32.dll\r
-<hash>ibal32d.dll=ibal32d.dll\r
-<hash>ibal32.dll=ibal32.dll\r
-\r
+++ /dev/null
-; OpenIB InfiniBand Access Layer (Upper Filter) Driver.\r
-; Copyright 2008 Intel Corporation all Rights Reserved.\r
-\r
-[Version]\r
-Signature="$Windows NT$"\r
-Class=InfiniBandHca\r
-; GUID matches mthca class giud\r
-ClassGuid={58517E00-D3CF-40c9-A679-CEE5752F4491}\r
-Provider=%OFA%\r
-DriverVer=05/07/2008,2.0.0000.1135\r
-CatalogFile=ibal.cat\r
-\r
-; ================= Device Install section =====================\r
-\r
-; 64-bit platforms also copy 32-bit user-mode binaries.\r
-\r
-[DestinationDirs]\r
-DefaultDestDir=%DIRID_DRIVERS%\r
-Ibal.UMCopyFiles=%DIRID_SYSTEM%\r
-Ibal.WOW64CopyFiles=%DIRID_SYSTEM_X86%\r
-\r
-[SourceDisksNames.x86]\r
-1=%DiskId%,,,""\r
-\r
-[SourceDisksNames.amd64]\r
-1=%DiskId%,,,""\r
-\r
-[SourceDisksNames.ia64]\r
-1=%DiskId%,,,""\r
-\r
-[SourceDisksFiles.x86]\r
-ibal.sys=1\r
-ibal.dll=1\r
-complib.dll=1\r
-ibald.dll=1\r
-complibd.dll=1\r
-\r
-[SourceDisksFiles.amd64]\r
-ibal.sys=1\r
-ibal.dll=1\r
-ibald.dll=1\r
-complib.dll=1\r
-complibd.dll=1\r
-cl32.dll=1\r
-cl32d.dll=1\r
-ibal32.dll=1\r
-ibal32d.dll=1\r
-\r
-[SourceDisksFiles.ia64]\r
-ibal.sys=1\r
-ibal.dll=1\r
-ibald.dll=1\r
-complib.dll=1\r
-complibd.dll=1\r
-cl32.dll=1\r
-cl32d.dll=1\r
-ibal32.dll=1\r
-ibal32d.dll=1\r
-\r
-[Manufacturer]\r
-%OFA% = Ibal.DeviceSection,ntx86,ntamd64,ntia64\r
-\r
-[Ibal.DeviceSection]\r
-; empty since we don't support W9x/Me\r
-\r
-[Ibal.DeviceSection.ntx86]\r
-%Ibal.DeviceDesc% = Ibal.DDInstall,{94f41ced-78eb-407c-b5df-958040af0fd8}\r
-\r
-[Ibal.DeviceSection.ntamd64]\r
-%Ibal.DeviceDesc% = Ibal.DDInstall,{94f41ced-78eb-407c-b5df-958040af0fd8}\r
-\r
-[Ibal.DeviceSection.ntia64]\r
-%Ibal.DeviceDesc% = Ibal.DDInstall,{94f41ced-78eb-407c-b5df-958040af0fd8}\r
-\r
-\r
-[Ibal.DDInstall.ntx86]\r
-CopyFiles = Ibal.CopyFiles\r
-CopyFiles = Ibal.UMCopyFiles\r
-\r
-[Ibal.DDInstall.ntamd64]\r
-CopyFiles = Ibal.CopyFiles\r
-CopyFiles = Ibal.UMCopyFiles\r
-CopyFiles = Ibal.WOW64CopyFiles\r
-\r
-[Ibal.DDInstall.ntia64]\r
-CopyFiles = Ibal.CopyFiles\r
-CopyFiles = Ibal.UMCopyFiles\r
-CopyFiles = Ibal.WOW64CopyFiles\r
-\r
-[Ibal.DDInstall.ntx86.Services]\r
-AddService = ibal,%SPSVCINST_ASSOCSERVICE%,Ibal.ServiceInstall\r
-\r
-[Ibal.DDInstall.ntamd64.Services]\r
-AddService = ibal,%SPSVCINST_ASSOCSERVICE%,Ibal.ServiceInstall\r
-\r
-[Ibal.DDInstall.ntia64.Services]\r
-AddService = ibal,%SPSVCINST_ASSOCSERVICE%,Ibal.ServiceInstall\r
-\r
-[Ibal.CopyFiles]\r
-ibal.sys\r
-\r
-[Ibal.UMCopyFiles]\r
-ibal.dll,,,2\r
-ibald.dll,,,2\r
-complib.dll,,,2\r
-complibd.dll,,,2\r
-\r
-[Ibal.WOW64CopyFiles]\r
-ibal.dll,ibal32.dll,,2\r
-ibald.dll,ibal32d.dll,,2\r
-complib.dll,cl32.dll,,2\r
-complibd.dll,cl32d.dll,,2\r
-\r
-;\r
-; ============= Service Install section ==============\r
-;\r
-\r
-[Ibal.ServiceInstall]\r
-DisplayName = %Ibal.ServiceDesc%\r
-ServiceType = %SERVICE_KERNEL_DRIVER%\r
-StartType = %SERVICE_DEMAND_START%\r
-ErrorControl = %SERVICE_ERROR_NORMAL%\r
-ServiceBinary = %12%\ibal.sys\r
-LoadOrderGroup = PnP Filter\r
-AddReg = Ibal.ParamsReg\r
-Dependencies = mthca\r
-\r
-[Ibal.ParamsReg]\r
-HKR,"Parameters","IbalDebugLevel",%REG_DWORD%,2\r
-HKR,"Parameters","IbalDebugFlags",%REG_DWORD%,0x00ffffff\r
-HKR,"Parameters","SmiPollInterval",%REG_DWORD_NO_CLOBBER%,20000\r
-HKR,"Parameters","IocQueryTimeout",%REG_DWORD_NO_CLOBBER%,250\r
-HKR,"Parameters","IocQueryRetries",%REG_DWORD_NO_CLOBBER%,4\r
-HKR,"Parameters","IocPollInterval",%REG_DWORD_NO_CLOBBER%,30000\r
-HKR,,"UpperFilters",0x00010000,"IBAL"\r
-\r
-[Strings]\r
-OFA = "OpenFabrics Alliance"\r
-ClassName = "IBAL Device"\r
-Ibal.DeviceDesc = "InfiniBand Access Layer (Filter Driver)"\r
-Ibal.ServiceDesc = "IBAL InfiniBand Access Layer Service"\r
-DiskId = "OpenFabrics InfiniBand Access Layer installation disk"\r
-SPSVCINST_NULL = 0x0\r
-SPSVCINST_ASSOCSERVICE = 0x00000002\r
-SERVICE_KERNEL_DRIVER = 1\r
-SERVICE_DEMAND_START = 3\r
-SERVICE_ERROR_NORMAL = 1\r
-REG_DWORD = 0x00010001\r
-REG_DWORD_NO_CLOBBER = 0x00010003\r
-DIRID_SYSTEM = 11\r
-DIRID_DRIVERS = 12\r
-DIRID_SYSTEM_X86 = 16425\r
+++ /dev/null
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- * Redistribution and use in source and binary forms, with or\r
- * without modification, are permitted provided that the following\r
- * conditions are met:\r
- *\r
- * - Redistributions of source code must retain the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer.\r
- *\r
- * - Redistributions in binary form must reproduce the above\r
- * copyright notice, this list of conditions and the following\r
- * disclaimer in the documentation and/or other materials\r
- * provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id: ibal.rc 474 2006-08-31 08:57:19Z sleybo $\r
- */\r
-\r
-\r
-#include <oib_ver.h>\r
-\r
-#define VER_FILETYPE VFT_DRV\r
-#define VER_FILESUBTYPE VFT2_UNKNOWN\r
-\r
-#ifdef _DEBUG_\r
-#define VER_FILEDESCRIPTION_STR "InfiniBand Access Layer (Upper Filter) Driver (Debug)"\r
-#else\r
-#define VER_FILEDESCRIPTION_STR "InfiniBand Access Layer (Upper Filter) Driver"\r
-#endif\r
-\r
-#define VER_INTERNALNAME_STR "ibal.sys"\r
-#define VER_ORIGINALFILENAME_STR "ibal.sys"\r
-\r
-#include <common.ver>\r
+++ /dev/null
-#\r
-# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source\r
-# file to this component. This file merely indirects to the real make file\r
-# that is shared by all the driver components of the OpenIB Windows project.\r
-#\r
-\r
-!INCLUDE ..\..\..\inc\openib.def\r