From: ftillier Date: Mon, 11 Jul 2005 19:51:17 +0000 (+0000) Subject: Merge fab_cm_branch into trunk X-Git-Url: https://openfabrics.org/gitweb/?a=commitdiff_plain;h=4923a3a307b11437376fdf2bc81886ba6c2c130e;p=~shefty%2Frdma-win.git Merge fab_cm_branch into trunk git-svn-id: svn://openib.tc.cornell.edu/gen1@33 ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86 --- diff --git a/trunk/core/al/al.c b/trunk/core/al/al.c index d83ee7ca..af2fdbdd 100644 --- a/trunk/core/al/al.c +++ b/trunk/core/al/al.c @@ -34,7 +34,7 @@ #include "al.h" #include "al_ca.h" -#include "al_cm_shared.h" +#include "al_cm_cep.h" #include "al_common.h" #include "al_debug.h" #include "al_mad_pool.h" @@ -107,6 +107,9 @@ destroying_al( } cl_spinlock_release( &p_obj->lock ); + + /* Cleanup any left-over connections. */ + al_cep_cleanup_al( h_al ); } @@ -138,33 +141,6 @@ __free_mads( } - -static void -__free_conns( - IN const ib_al_handle_t h_al ) -{ - cl_list_item_t *p_list_item; - ib_cm_handle_t h_conn; - - /* - * Report any outstanding connections left lying around. We should - * never enter the loop below if the code is written correctly. - */ - for( p_list_item = cl_qlist_head( &h_al->conn_list ); - p_list_item != cl_qlist_end( &h_al->conn_list ); - p_list_item = cl_qlist_head( &h_al->conn_list ) ) - { - CL_ASSERT( !p_list_item ); - - h_conn = PARENT_STRUCT( p_list_item, al_conn_t, al_item ); - - /* Release the connection object, so the CM can clean-up properly. */ - cm_cleanup_conn( h_conn ); - } -} - - - void free_al( IN al_obj_t *p_obj ) @@ -177,9 +153,6 @@ free_al( /* Free any MADs not returned by the user. */ __free_mads( h_al ); - /* Cleanup any left-over connections. */ - __free_conns( h_al ); - #ifdef CL_KERNEL cl_vector_destroy( &h_al->hdl_vector ); #endif @@ -190,8 +163,6 @@ free_al( } - - ib_api_status_t ib_query_ca_by_guid( IN const ib_al_handle_t h_al, @@ -229,46 +200,6 @@ ib_query_ca_by_guid( -void -al_insert_conn( - IN const ib_al_handle_t h_al, - IN const ib_cm_handle_t h_conn ) -{ - ref_al_obj( &h_al->obj ); - cl_spinlock_acquire( &h_al->obj.lock ); - - h_conn->h_al = h_al; - cl_qlist_insert_tail( &h_al->conn_list, &h_conn->al_item ); -#ifdef CL_KERNEL - h_conn->hdl = al_hdl_insert( h_al, h_conn, AL_OBJ_TYPE_H_CONN ); -#endif - - cl_spinlock_release( &h_al->obj.lock ); -} - - - -void -al_remove_conn( - IN const ib_cm_handle_t h_conn ) -{ - cl_spinlock_acquire( &h_conn->h_al->obj.lock ); - cl_qlist_remove_item( &h_conn->h_al->conn_list, &h_conn->al_item ); -#ifdef CL_KERNEL - al_hdl_free( h_conn->h_al, h_conn->hdl ); -#endif - cl_spinlock_release( &h_conn->h_al->obj.lock ); - - deref_al_obj( &h_conn->h_al->obj ); - - h_conn->h_al = NULL; -#ifdef CL_KERNEL - h_conn->hdl = AL_INVALID_HANDLE; -#endif -} - - - void al_insert_mad( IN const ib_al_handle_t h_al, diff --git a/trunk/core/al/al.h b/trunk/core/al/al.h index 71c3eaa3..df8215e2 100644 --- a/trunk/core/al/al.h +++ b/trunk/core/al/al.h @@ -81,7 +81,7 @@ typedef struct _ib_al cl_qlist_t key_list; cl_qlist_t query_list; - cl_qlist_t conn_list; + cl_qlist_t cep_list; #ifdef CL_KERNEL /* Handle manager is only needed in the kernel. */ diff --git a/trunk/core/al/al_ci_ca_shared.c b/trunk/core/al/al_ci_ca_shared.c index 9da9d333..9795b5c1 100644 --- a/trunk/core/al/al_ci_ca_shared.c +++ b/trunk/core/al/al_ci_ca_shared.c @@ -37,10 +37,6 @@ #include "al_mgr.h" #include "al_pnp.h" #include "al_qp.h" - -#if defined(CL_KERNEL) -#include "al_cm.h" -#endif #include "ib_common.h" @@ -263,11 +259,6 @@ ci_ca_process_event_cb( case IB_AE_QP_COMM: case IB_AE_QP_APM: case IB_AE_QP_APM_ERROR: -#if defined(CL_KERNEL) - cm_async_event_cb( &p_event_item->event_rec ); -#endif - /* Fall through next case. */ - case IB_AE_QP_FATAL: case IB_AE_RQ_ERROR: case IB_AE_SQ_ERROR: diff --git a/trunk/core/al/al_cm_cep.h b/trunk/core/al/al_cm_cep.h new file mode 100644 index 00000000..aff72d77 --- /dev/null +++ b/trunk/core/al/al_cm_cep.h @@ -0,0 +1,445 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#pragma once + +#ifndef _AL_CM_CEP_H_ +#define _AL_CM_CEP_H_ + + +#include +#include "al_common.h" + + +#define CEP_EVENT_TIMEOUT 0x80000000 +#define CEP_EVENT_RECV 0x40000000 +#define CEP_EVENT_REQ 0x00000001 +#define CEP_EVENT_REP 0x00000002 +#define CEP_EVENT_RTU 0x00000004 +#define CEP_EVENT_DREQ 0x00000008 +#define CEP_EVENT_DREP 0x00000010 +#define CEP_EVENT_MRA 0x00000020 +#define CEP_EVENT_REJ 0x00000040 +#define CEP_EVENT_LAP 0x00000080 +#define CEP_EVENT_APR 0x00000100 +#define CEP_EVENT_SIDR 0x00800000 + + +#define AL_INVALID_CID 0xFFFFFFFF + + +typedef void +(*al_pfn_cep_cb_t)( + IN const ib_al_handle_t h_al, + IN ib_cep_t* const p_cep ); +/* PARAMETERS +* h_al +* [in] Handle to the AL instance to pass into the al_cep_poll call. +* +* p_cep +* [in] Pointer to an ib_cep_t structure containing the CID and context +* for the CEP on which the event occured. The CID should be passed +* into the al_cep_poll call. +* +* RETURN VALUES: +* This function does not return a value. +* +* NOTES +* The callback is invoked at DISPATCH_LEVEL. +* +* Recipients of the callback are expected to call al_cep_poll to retrieve +* event specific details until al_cep_poll returns IB_NOT_DONE. This may +* be done in a different thread context. +*********/ + + +ib_api_status_t +create_cep_mgr( + IN al_obj_t* const p_parent_obj ); + + +void +al_cep_cleanup_al( + IN const ib_al_handle_t h_al ); + + +ib_api_status_t +al_create_cep( + IN ib_al_handle_t h_al, + IN al_pfn_cep_cb_t pfn_cb, + IN void *context, + OUT net32_t* const p_cid ); +/* +* NOTES +* This function may be invoked at DISPATCH_LEVEL +* +* The pfn_cb parameter may be NULL in the kernel if using IRPs for +* event notification. +*********/ + + +/* Destruction is asynchronous. */ +ib_api_status_t +al_destroy_cep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN ib_pfn_destroy_cb_t pfn_destroy_cb ); +/* +* NOTES +* Destruction is synchronous. +* Clients must not invoke this function from a CEP callback, but should +* instead return IB_CANCELLED or other appropriate value. +* +* The reason parameter is passed as input to KeWaitForSingleObject. +* The user-mode proxy sets this to UserRequest. Kernel clients should set +* this to Executive. +*********/ + +ib_api_status_t +al_cep_listen( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN ib_cep_listen_t* const p_listen_info ); + + +ib_api_status_t +al_cep_pre_req( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_req_t* const p_cm_req, + OUT ib_qp_mod_t* const p_init ); + + +ib_api_status_t +al_cep_send_req( + IN ib_al_handle_t h_al, + IN net32_t cid ); + + +ib_api_status_t +al_cep_pre_rep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN void *context, + IN const ib_cm_rep_t* const p_cm_rep, + OUT ib_qp_mod_t* const p_init ); + + +ib_api_status_t +al_cep_send_rep( + IN ib_al_handle_t h_al, + IN net32_t cid ); + + +ib_api_status_t +al_cep_get_rtr_attr( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT ib_qp_mod_t* const p_rtr ); + + +ib_api_status_t +al_cep_get_rts_attr( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT ib_qp_mod_t* const p_rts ); + + +ib_api_status_t +al_cep_rtu( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const uint8_t* p_pdata OPTIONAL, + IN uint8_t pdata_len ); + + +ib_api_status_t +al_cep_rej( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN ib_rej_status_t rej_status, + IN const uint8_t* const p_ari, + IN uint8_t ari_len, + IN const uint8_t* const p_pdata, + IN uint8_t pdata_len ); + + +ib_api_status_t +al_cep_mra( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_mra_t* const p_cm_mra ); + + +ib_api_status_t +al_cep_lap( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_lap_t* const p_cm_lap ); + + +ib_api_status_t +al_cep_pre_apr( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_apr_t* const p_cm_apr, + OUT ib_qp_mod_t* const p_apr ); + + +ib_api_status_t +al_cep_send_apr( + IN ib_al_handle_t h_al, + IN net32_t cid ); + + +ib_api_status_t +al_cep_dreq( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const uint8_t* const p_pdata OPTIONAL, + IN const uint8_t pdata_len ); + + +ib_api_status_t +al_cep_drep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_drep_t* const p_cm_drep ); + + +ib_api_status_t +al_cep_get_timewait( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT uint64_t* const p_timewait_us ); + + +ib_api_status_t +al_cep_migrate( + IN ib_al_handle_t h_al, + IN net32_t cid ); + + +ib_api_status_t +al_cep_established( + IN ib_al_handle_t h_al, + IN net32_t cid ); + + +ib_api_status_t +al_cep_poll( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN OUT ib_cep_t* const p_new_cep, + OUT ib_mad_element_t** const pp_mad ); + + +#ifdef CL_KERNEL +NTSTATUS +al_cep_queue_irp( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN IRP* const p_irp ); +#endif /* CL_KERNEL */ + + +/****s* Access Layer/al_cep_sreq_t +* NAME +* al_cep_sreq_t +* +* DESCRIPTION +* Connection request information used to establish a new connection. +* +* SYNOPSIS +*/ +typedef struct _al_cep_sreq +{ + ib_net64_t svc_id; + + ib_path_rec_t* __ptr64 p_path; + + const uint8_t* __ptr64 p_pdata; + uint8_t pdata_len; + + uint8_t max_cm_retries; + ib_net16_t pkey; + uint32_t timeout_ms; + +} al_cep_sreq_t; +/* +* FIELDS +* svc_id +* The ID of the remote service to which the SIDR request is +* being made. +* +* p_path +* Path information over which to send the request. +* +* p_pdata +* Optional user-defined private data sent as part of the SIDR request. +* +* pdata_len +* Defines the size of the user-defined private data. +* +* max_cm_retries +* The maximum number of times that either CM should +* resend a SIDR message. +* +* timeout_ms +* Timeout value in milli-seconds for the SIDR REQ to expire. The CM will +* add twice packet lifetime to this value to determine the actual timeout +* value used. +* +* pkey +* pkey to be used as part of the request. +* +* SEE ALSO +* al_cep_sreq +*****/ + +ib_api_status_t +al_cep_sreq( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const al_cep_sreq_t* const p_sreq ); + + +/****s* Access Layer/al_cep_srep_t +* NAME +* al_cep_srep_t +* +* DESCRIPTION +* SIDR reply information. +* +* SYNOPSIS +*/ +typedef struct _al_cep_srep +{ + net32_t qp_num; + net32_t qkey; + + const uint8_t* __ptr64 p_pdata; + const void* __ptr64 p_info; + + uint8_t pdata_len; + uint8_t info_len; + + ib_sidr_status_t status; + +} al_cep_srep_t; +/* +* FIELDS +* qp_num +* The number of the queue pair on which the requested service +* is supported. +* +* qp_key +* The QKEY of the returned queue pair. +* +* p_pdata +* Optional user-defined private data sent as part of the SIDR reply. +* +* p_info +* Optional "additonal information" sent as part of the SIDR reply. +* +* pdata_len +* Size of the user-defined private data. +* +* info_len +* Size of the "additional information". +* +* status +* sidr status value returned back to a previously received REQ. +* +* SEE ALSO +* al_cep_srep +*****/ + +ib_api_status_t +al_cep_srep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const al_cep_srep_t* const p_sreq ); + + + + +/* + * Return the local ACK timeout value based on the given packet lifetime + * and target ACK delay. Both input values are assumed to be in the form + * 4.096 x 2 ^ input. + */ +#define MAX_LOCAL_ACK_TIMEOUT 0x1F /* limited to 5 bits */ + +inline uint8_t +calc_lcl_ack_timeout( + IN const uint8_t round_trip_time, + IN const uint8_t target_ack_delay ) +{ + uint64_t timeout; + uint8_t local_ack_timeout; + + if( !target_ack_delay ) + { + if( round_trip_time > MAX_LOCAL_ACK_TIMEOUT ) + return MAX_LOCAL_ACK_TIMEOUT; + else + return round_trip_time; + } + + /* + * Since both input and the output values are in the same form, we + * can ignore the 4.096 portion by dividing it out. + */ + + /* The input parameter is the round trip time. */ + timeout = (uint64_t)1 << round_trip_time; + + /* Add in the target ack delay. */ + if( target_ack_delay ) + timeout += (uint64_t)1 << target_ack_delay; + + /* Calculate the local ACK timeout. */ + local_ack_timeout = 1; + while( (1ui64 << local_ack_timeout) <= timeout ) + { + local_ack_timeout++; + + /* Only 5-bits are valid. */ + if( local_ack_timeout > MAX_LOCAL_ACK_TIMEOUT ) + return MAX_LOCAL_ACK_TIMEOUT; + } + + return local_ack_timeout; +} + +#endif /* _AL_CM_CEP_H_ */ diff --git a/trunk/core/al/al_cm_conn.h b/trunk/core/al/al_cm_conn.h index fea30523..d11ab34f 100644 --- a/trunk/core/al/al_cm_conn.h +++ b/trunk/core/al/al_cm_conn.h @@ -134,6 +134,7 @@ typedef struct _mad_cm_req uint8_t pdata[IB_REQ_PDATA_SIZE]; } PACK_SUFFIX mad_cm_req_t; +C_ASSERT( sizeof(mad_cm_req_t) == MAD_BLOCK_SIZE ); #include @@ -362,7 +363,7 @@ conn_req_set_max_cm_retries( p_req->offset51 = (retries << 4); } -static inline void +static inline ib_api_status_t conn_req_set_pdata( IN const uint8_t* const p_data OPTIONAL, IN const uint8_t data_len, @@ -372,14 +373,17 @@ conn_req_set_pdata( if( p_data ) { + if( data_len > IB_REQ_PDATA_SIZE ) + return IB_INVALID_SETTING; + cl_memcpy( p_req->pdata, p_data, data_len ); - cl_memclr( p_req->pdata + data_len, - IB_REQ_PDATA_SIZE - data_len ); + cl_memclr( p_req->pdata + data_len, IB_REQ_PDATA_SIZE - data_len ); } else { cl_memclr( p_req->pdata, IB_REQ_PDATA_SIZE ); } + return IB_SUCCESS; } static inline void @@ -457,9 +461,9 @@ conn_req_path_set_subn_lcl( IN OUT req_path_info_t* const p_path ) { if( subn_lcl ) - p_path->offset42 = (p_path->offset42 & 0xF0); - else p_path->offset42 = ((p_path->offset42 & 0xF0) | 0x08); + else + p_path->offset42 = (p_path->offset42 & 0xF0); } static inline uint8_t @@ -508,6 +512,7 @@ typedef struct _mad_cm_mra uint8_t pdata[IB_MRA_PDATA_SIZE]; } PACK_SUFFIX mad_cm_mra_t; +C_ASSERT( sizeof(mad_cm_mra_t) == MAD_BLOCK_SIZE ); #include @@ -547,11 +552,11 @@ conn_mra_set_pdata( IN const uint8_t data_len, IN OUT mad_cm_mra_t* const p_mra ) { - if( p_data && data_len > IB_MRA_PDATA_SIZE ) - return IB_INVALID_SETTING; - if( p_data ) { + if( data_len > IB_MRA_PDATA_SIZE ) + return IB_INVALID_SETTING; + cl_memcpy( p_mra->pdata, p_data, data_len ); cl_memclr( p_mra->pdata + data_len, IB_MRA_PDATA_SIZE - data_len ); } @@ -596,6 +601,7 @@ typedef struct _mad_cm_rej uint8_t pdata[IB_REJ_PDATA_SIZE]; } PACK_SUFFIX mad_cm_rej_t; +C_ASSERT( sizeof(mad_cm_rej_t) == MAD_BLOCK_SIZE ); #include @@ -650,14 +656,13 @@ conn_rej_set_pdata( IN const uint8_t data_len, IN OUT mad_cm_rej_t* const p_rej ) { - if( p_data && data_len > IB_REJ_PDATA_SIZE ) - return IB_INVALID_PARAMETER; - if( p_data ) { + if( data_len > IB_REJ_PDATA_SIZE ) + return IB_INVALID_SETTING; + cl_memcpy( p_rej->pdata, p_data, data_len ); - cl_memclr( p_rej->pdata + data_len, - IB_REJ_PDATA_SIZE - data_len ); + cl_memclr( p_rej->pdata + data_len, IB_REJ_PDATA_SIZE - data_len ); } else { @@ -707,6 +712,7 @@ typedef struct _mad_cm_rep uint8_t pdata[IB_REP_PDATA_SIZE]; } PACK_SUFFIX mad_cm_rep_t; +C_ASSERT( sizeof(mad_cm_rep_t) == MAD_BLOCK_SIZE ); #include @@ -818,23 +824,27 @@ conn_rep_set_rnr_retry_cnt( p_rep->offset27 = (rnr_retry_cnt << 5); } -static inline void +static inline ib_api_status_t conn_rep_set_pdata( IN const uint8_t* const p_data OPTIONAL, - IN const uint8_t rep_len, + IN const uint8_t data_len, IN OUT mad_cm_rep_t* const p_rep ) { CL_ASSERT( p_rep->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + if( p_data ) { - cl_memcpy( p_rep->pdata, p_data, rep_len ); - cl_memclr( p_rep->pdata + rep_len, - IB_REP_PDATA_SIZE - rep_len ); + if( data_len > IB_REP_PDATA_SIZE ) + return IB_INVALID_SETTING; + + cl_memcpy( p_rep->pdata, p_data, data_len ); + cl_memclr( p_rep->pdata + data_len, IB_REP_PDATA_SIZE - data_len ); } else { cl_memclr( p_rep->pdata, IB_REP_PDATA_SIZE ); } + return IB_SUCCESS; } static inline void @@ -863,24 +873,29 @@ typedef struct _mad_cm_rtu uint8_t pdata[IB_RTU_PDATA_SIZE]; } PACK_SUFFIX mad_cm_rtu_t; +C_ASSERT( sizeof(mad_cm_rtu_t) == MAD_BLOCK_SIZE ); #include -static inline void +static inline ib_api_status_t conn_rtu_set_pdata( IN const uint8_t* const p_data OPTIONAL, - IN const uint8_t rtu_len, + IN const uint8_t data_len, IN OUT mad_cm_rtu_t* const p_rtu ) { if( p_data ) { - cl_memcpy( p_rtu->pdata, p_data, rtu_len ); - cl_memclr( p_rtu->pdata + rtu_len, IB_RTU_PDATA_SIZE - rtu_len ); + if( data_len > IB_RTU_PDATA_SIZE ) + return IB_INVALID_SETTING; + + cl_memcpy( p_rtu->pdata, p_data, data_len ); + cl_memclr( p_rtu->pdata + data_len, IB_RTU_PDATA_SIZE - data_len ); } else { cl_memclr( p_rtu->pdata, IB_RTU_PDATA_SIZE ); } + return IB_SUCCESS; } /* DREQ */ @@ -900,6 +915,7 @@ typedef struct _mad_cm_dreq uint8_t pdata[IB_DREQ_PDATA_SIZE]; } PACK_SUFFIX mad_cm_dreq_t; +C_ASSERT( sizeof(mad_cm_dreq_t) == MAD_BLOCK_SIZE ); #include @@ -918,22 +934,25 @@ conn_dreq_set_remote_qpn( __set_low24( &p_dreq->offset8, qpn ); } -static inline void +static inline ib_api_status_t conn_dreq_set_pdata( IN const uint8_t* const p_data OPTIONAL, - IN const uint8_t dreq_len, + IN const uint8_t data_len, IN OUT mad_cm_dreq_t* const p_dreq ) { if( p_data ) { - cl_memcpy( p_dreq->pdata, p_data, dreq_len ); - cl_memclr( p_dreq->pdata + dreq_len, - IB_DREQ_PDATA_SIZE - dreq_len ); + if( data_len > IB_DREQ_PDATA_SIZE ) + return IB_INVALID_SETTING; + + cl_memcpy( p_dreq->pdata, p_data, data_len ); + cl_memclr( p_dreq->pdata + data_len, IB_DREQ_PDATA_SIZE - data_len ); } else { cl_memclr( p_dreq->pdata, IB_DREQ_PDATA_SIZE ); } + return IB_SUCCESS; } static inline void @@ -960,25 +979,29 @@ typedef struct _mad_cm_drep uint8_t pdata[IB_DREP_PDATA_SIZE]; } PACK_SUFFIX mad_cm_drep_t; +C_ASSERT( sizeof(mad_cm_drep_t) == MAD_BLOCK_SIZE ); #include -static inline void +static inline ib_api_status_t conn_drep_set_pdata( IN const uint8_t* const p_data OPTIONAL, - IN const uint8_t drep_len, + IN const uint8_t data_len, IN OUT mad_cm_drep_t* const p_drep ) { if( p_data ) { - cl_memcpy( p_drep->pdata, p_data, drep_len ); - cl_memclr( p_drep->pdata + drep_len, - IB_DREP_PDATA_SIZE - drep_len ); + if( data_len > IB_DREP_PDATA_SIZE ) + return IB_INVALID_SETTING; + + cl_memcpy( p_drep->pdata, p_data, data_len ); + cl_memclr( p_drep->pdata + data_len, IB_DREP_PDATA_SIZE - data_len ); } else { cl_memclr( p_drep->pdata, IB_DREP_PDATA_SIZE ); } + return IB_SUCCESS; } @@ -1021,6 +1044,7 @@ typedef struct _mad_cm_lap uint8_t pdata[IB_LAP_PDATA_SIZE]; } PACK_SUFFIX mad_cm_lap_t; +C_ASSERT( sizeof(mad_cm_lap_t) == MAD_BLOCK_SIZE ); #include @@ -1066,12 +1090,13 @@ conn_lap_set_pdata( IN OUT mad_cm_lap_t* const p_lap ) { CL_ASSERT( p_lap->hdr.class_ver == IB_MCLASS_CM_VER_2 ); - if( p_data && data_len > IB_LAP_PDATA_SIZE ) - return IB_INVALID_PARAMETER; cl_memclr( p_lap->pdata, IB_LAP_PDATA_SIZE ); if( p_data ) { + if( data_len > IB_LAP_PDATA_SIZE ) + return IB_INVALID_PARAMETER; + cl_memcpy( p_lap->pdata, p_data, data_len ); cl_memclr( p_lap->pdata + data_len, IB_LAP_PDATA_SIZE - data_len ); @@ -1221,6 +1246,7 @@ typedef struct _mad_cm_apr uint8_t pdata[IB_APR_PDATA_SIZE]; } PACK_SUFFIX mad_cm_apr_t; +C_ASSERT( sizeof(mad_cm_apr_t) == MAD_BLOCK_SIZE ); #include @@ -1256,11 +1282,11 @@ conn_apr_set_pdata( IN OUT mad_cm_apr_t* const p_apr ) { CL_ASSERT( p_apr->hdr.class_ver == IB_MCLASS_CM_VER_2 ); - if( p_data && ( data_len > IB_APR_PDATA_SIZE ) ) - return IB_INVALID_PARAMETER; - if( p_data ) { + if( data_len > IB_APR_PDATA_SIZE ) + return IB_INVALID_PARAMETER; + cl_memcpy( p_apr->pdata, p_data, data_len ); cl_memclr( p_apr->pdata + data_len, IB_APR_PDATA_SIZE - data_len ); diff --git a/trunk/core/al/al_cm_qp.c b/trunk/core/al/al_cm_qp.c new file mode 100644 index 00000000..93570efc --- /dev/null +++ b/trunk/core/al/al_cm_qp.c @@ -0,0 +1,2014 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include "al.h" +#include "al_qp.h" +#include "al_cm_cep.h" +#include "al_cm_conn.h" +#include "al_cm_sidr.h" +#include "al_mgr.h" +#include "al_debug.h" + + +typedef struct _al_listen +{ + al_obj_t obj; + net32_t cid; + + ib_pfn_cm_req_cb_t pfn_cm_req_cb; + + /* valid for ud qp_type only */ + const void* __ptr64 sidr_context; + +} al_listen_t; + + +#ifdef CL_KERNEL + +/* + * Structure for queuing received MADs to the asynchronous processing + * manager. + */ +typedef struct _cep_async_mad +{ + cl_async_proc_item_t item; + ib_al_handle_t h_al; + ib_cep_t cep; + +} cep_async_mad_t; + +#endif /* CL_KERNEL */ + + +/* + * Transition the QP to the error state to flush all oustanding work + * requests and sets the timewait time. This function may be called + * when destroying the QP in order to flush all work requests, so we + * cannot call through the main API, or the call will fail since the + * QP is no longer in the initialize state. + */ +static void +__cep_timewait_qp( + IN const ib_qp_handle_t h_qp ) +{ + uint64_t timewait = 0; + ib_qp_mod_t qp_mod; + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_qp ); + + /* + * The CM should have set the proper timewait time-out value. Reset + * the QP and let it enter the timewait state. + */ + if( al_cep_get_timewait( h_qp->obj.h_al, + ((al_conn_qp_t*)h_qp)->cid, &timewait ) == IB_SUCCESS ) + { + /* Special checks on the QP state for error handling - see above. */ + if( !h_qp || !AL_OBJ_IS_TYPE( h_qp, AL_OBJ_TYPE_H_QP ) || + ( (h_qp->obj.state != CL_INITIALIZED) && + (h_qp->obj.state != CL_DESTROYING) ) ) + { + AL_TRACE_EXIT( AL_DBG_CM, ("IB_INVALID_QP_HANDLE\n") ); + return; + } + + cl_memclr( &qp_mod, sizeof(ib_qp_mod_t) ); + qp_mod.req_state = IB_QPS_ERROR; + + /* Modify to error state using function pointers - see above. */ + status = h_qp->pfn_modify_qp( h_qp, &qp_mod, NULL ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("pfn_modify_qp to IB_QPS_ERROR returned %s\n", + ib_get_err_str( status )) ); + return; + } + +#ifdef CL_KERNEL + /* Store the timestamp after which the QP exits timewait. */ + h_qp->timewait = cl_get_time_stamp() + timewait; +#endif /* CL_KERNEL */ + } + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__format_req_path_rec( + IN const mad_cm_req_t* const p_req, + IN const req_path_info_t* const p_path, + OUT ib_path_rec_t* const p_path_rec ) +{ + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_req ); + CL_ASSERT( p_path ); + CL_ASSERT( p_path_rec ); + + /* + * Format a local path record. The local ack timeout specified in the + * REQ is twice the packet life plus the sender's CA ACK delay. When + * reporting the packet life, we divide the local ack timeout by 2 to + * approach the path's packet lifetime. Since local ack timeout is + * expressed as 4.096 * 2^x, subtracting 1 is equivalent to dividing the + * time in half. + */ + ib_path_rec_init_local( p_path_rec, + &p_path->local_gid, + &p_path->remote_gid, + p_path->local_lid, + p_path->remote_lid, + 1, p_req->pkey, + conn_req_path_get_svc_lvl( p_path ), + IB_PATH_SELECTOR_EXACTLY, conn_req_get_mtu( p_req ), + IB_PATH_SELECTOR_EXACTLY, + conn_req_path_get_pkt_rate( p_path ), + IB_PATH_SELECTOR_EXACTLY, + (uint8_t)( conn_req_path_get_lcl_ack_timeout( p_path ) - 1 ), + 0 ); + + p_path_rec->hop_flow_raw.val = 0; + /* Add global routing info as necessary. */ + if( !conn_req_path_get_subn_lcl( p_path ) ) + { + ib_path_rec_set_hop_flow_raw( p_path_rec, p_path->hop_limit, + conn_req_path_get_flow_lbl( p_path ), FALSE ); + p_path_rec->tclass = p_path->traffic_class; + } + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__format_req_rec( + IN const mad_cm_req_t* const p_req, + OUT ib_cm_req_rec_t *p_req_rec ) +{ + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_req ); + CL_ASSERT( p_req_rec ); + + cl_memclr( p_req_rec, sizeof(ib_cm_req_rec_t) ); + + /* format version specific data */ + p_req_rec->p_req_pdata = p_req->pdata; + + p_req_rec->qp_type = conn_req_get_qp_type( p_req ); + + p_req_rec->resp_res = conn_req_get_resp_res( p_req ); + p_req_rec->flow_ctrl = conn_req_get_flow_ctrl( p_req ); + p_req_rec->rnr_retry_cnt = conn_req_get_rnr_retry_cnt( p_req ); + + __format_req_path_rec( p_req, &p_req->primary_path, + &p_req_rec->primary_path ); + __format_req_path_rec( p_req, &p_req->alternate_path, + &p_req_rec->alt_path ); + + /* These values are filled in later based on listen or peer connections + p_req_rec->context = ; + p_req_rec->h_cm_req = ; + p_req_rec->h_cm_listen = ; + */ + + AL_EXIT( AL_DBG_CM ); +} + + +/****************************************************************************** +* Functions that handle incoming REQs that matched to an outstanding listen. +* +*/ + + +static void +__listen_req( + IN al_listen_t* const p_listen, + IN const ib_cep_t* const p_new_cep, + IN const mad_cm_req_t* const p_req ) +{ + ib_cm_req_rec_t req_rec; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_listen ); + CL_ASSERT( p_new_cep ); + CL_ASSERT( p_req ); + + /* Format the callback record. */ + __format_req_rec( p_req, &req_rec ); + + /* update listen based rec */ + req_rec.context = p_listen->obj.context; + + req_rec.h_cm_req.cid = p_new_cep->cid; + req_rec.h_cm_req.h_al = p_listen->obj.h_al; + req_rec.h_cm_req.h_qp = p_new_cep->context; + + req_rec.h_cm_listen = p_listen; + + /* Invoke the user's callback. */ + p_listen->pfn_cm_req_cb( &req_rec ); + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__proc_listen( + IN al_listen_t* const p_listen, + IN ib_cep_t* const p_new_cep, + IN const ib_mad_t* const p_mad ) +{ + AL_ENTER( AL_DBG_CM ); + + /* Context is a listen - MAD must be a REQ or SIDR REQ */ + switch( p_mad->attr_id ) + { + case CM_REQ_ATTR_ID: + __listen_req( + p_listen, p_new_cep, (mad_cm_req_t*)p_mad ); + break; + + case CM_SIDR_REQ_ATTR_ID: + /* TODO - implement SIDR. */ + default: + CL_ASSERT( p_mad->attr_id == CM_REQ_ATTR_ID || + p_mad->attr_id == CM_SIDR_REQ_ATTR_ID ); + /* Destroy the new CEP as it won't ever be reported to the user. */ + al_destroy_cep( p_listen->obj.h_al, p_new_cep->cid, NULL ); + } + + AL_EXIT( AL_DBG_CM ); +} + + +/****************************************************************************** +* Functions that handle send timeouts: +* +*/ + +/* + * callback to process a connection establishment timeout due to reply not + * being received. The connection object has a reference + * taken when the timer is set or when the send is sent. + */ +static void +__proc_conn_timeout( + IN const ib_qp_handle_t h_qp ) +{ + ib_cm_rej_rec_t rej_rec; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_qp ); + + /* + * Format the reject record before aborting the connection since + * we need the QP context. + */ + cl_memclr( &rej_rec, sizeof(ib_cm_rej_rec_t) ); + rej_rec.h_qp = h_qp; + rej_rec.qp_context = h_qp->obj.context; + rej_rec.rej_status = IB_REJ_TIMEOUT; + + ref_al_obj( &h_qp->obj ); + + /* Unbind the QP from the CEP. */ + __cep_timewait_qp( h_qp ); + + cid = cl_atomic_xchg( &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID ); + + /* Invoke the callback. */ + ((al_conn_qp_t*)h_qp)->pfn_cm_rej_cb( &rej_rec ); + + if( cid == AL_INVALID_CID || + al_destroy_cep( h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS ) + { + deref_al_obj( &h_qp->obj ); + } + + AL_EXIT( AL_DBG_CM ); +} + + +/* + * callback to process a LAP timeout due to APR not being received. + */ +static void +__proc_lap_timeout( + IN const ib_qp_handle_t h_qp ) +{ + ib_cm_apr_rec_t apr_rec; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_qp ); + + /* Report the timeout. */ + cl_memclr( &apr_rec, sizeof(ib_cm_apr_rec_t) ); + apr_rec.h_qp = h_qp; + apr_rec.qp_context = h_qp->obj.context; + apr_rec.cm_status = IB_TIMEOUT; + apr_rec.apr_status = IB_AP_REJECT; + + /* Notify the user that the LAP failed. */ + ((al_conn_qp_t*)h_qp)->pfn_cm_apr_cb( &apr_rec ); + + AL_EXIT( AL_DBG_CM ); +} + + +/* + * Callback to process a disconnection timeout due to not receiving the DREP + * within allowable time. + */ +static void +__proc_dconn_timeout( + IN const ib_qp_handle_t h_qp ) +{ + ib_cm_drep_rec_t drep_rec; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + /* No response. We're done. Deliver a DREP callback. */ + cl_memclr( &drep_rec, sizeof(ib_cm_drep_rec_t) ); + drep_rec.h_qp = h_qp; + drep_rec.qp_context = h_qp->obj.context; + drep_rec.cm_status = IB_TIMEOUT; + + ref_al_obj( &h_qp->obj ); + + __cep_timewait_qp( h_qp ); + + cid = cl_atomic_xchg( &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID ); + + /* Call the user back. */ + ((al_conn_qp_t*)h_qp)->pfn_cm_drep_cb( &drep_rec ); + + if( cid == AL_INVALID_CID || + al_destroy_cep( h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS ) + { + deref_al_obj( &h_qp->obj ); + } + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__proc_failed_send( + IN ib_qp_handle_t h_qp, + IN const ib_mad_t* const p_mad ) +{ + AL_ENTER( AL_DBG_CM ); + + /* Failure indicates a send. */ + switch( p_mad->attr_id ) + { + case CM_REQ_ATTR_ID: + case CM_REP_ATTR_ID: + __proc_conn_timeout( h_qp ); + break; + case CM_LAP_ATTR_ID: + __proc_lap_timeout( h_qp ); + break; + case CM_DREQ_ATTR_ID: + __proc_dconn_timeout( h_qp ); + break; + default: + AL_TRACE( AL_DBG_ERROR, + ("Invalid CM send MAD attribute ID %d.\n", p_mad->attr_id) ); + break; + } + + AL_EXIT( AL_DBG_CM ); +} + + +/****************************************************************************** +* Functions that handle received MADs on a connection (not listen) +* +*/ + + +void +__proc_peer_req( + IN const ib_cm_handle_t* const p_cm, + IN const mad_cm_req_t* const p_req ) +{ + ib_cm_req_rec_t req_rec; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_cm ); + CL_ASSERT( p_cm->h_qp ); + /* Must be peer-to-peer. */ + CL_ASSERT( ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_req_cb ); + CL_ASSERT( p_req ); + + /* Format the callback record. */ + __format_req_rec( p_req, &req_rec ); + + /* update peer based rec handles and context values */ + req_rec.context = p_cm->h_qp->obj.context; + req_rec.h_cm_req = *p_cm; + req_rec.h_cm_listen = NULL; + + /* Invoke the user's callback. User must call ib_cm_rep or ib_cm_rej. */ + ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_req_cb( &req_rec ); + + AL_EXIT( AL_DBG_CM ); +} + + +void +__proc_mra( + IN const ib_cm_handle_t* const p_cm, + IN const mad_cm_mra_t* const p_mra ) +{ + ib_cm_mra_rec_t mra_rec; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_cm->h_qp ); + CL_ASSERT( ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_mra_cb ); + + /* Format the MRA callback record. */ + cl_memclr( &mra_rec, sizeof(ib_cm_mra_rec_t) ); + + mra_rec.h_qp = p_cm->h_qp; + mra_rec.qp_context = p_cm->h_qp->obj.context; + mra_rec.p_mra_pdata = p_mra->pdata; + + /* + * Call the user back. Note that users will get a callback only + * for the first MRA received in response to a REQ, REP, or LAP. + */ + ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_mra_cb( &mra_rec ); + + AL_EXIT( AL_DBG_CM ); +} + + +void +__proc_rej( + IN const ib_cm_handle_t* const p_cm, + IN const mad_cm_rej_t* const p_rej ) +{ + ib_cm_rej_rec_t rej_rec; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + if( p_cm->h_qp ) + { + /* Format the REJ callback record. */ + cl_memclr( &rej_rec, sizeof(ib_cm_rej_rec_t) ); + + rej_rec.h_qp = p_cm->h_qp; + rej_rec.qp_context = p_cm->h_qp->obj.context; + + rej_rec.p_rej_pdata = p_rej->pdata; + rej_rec.p_ari = p_rej->ari; + rej_rec.ari_length = conn_rej_get_ari_len( p_rej ); + rej_rec.rej_status = p_rej->reason; + + ref_al_obj( &p_cm->h_qp->obj ); + + /* + * Unbind the QP from the connection object. This allows the QP to + * be immediately reused in another connection request. + */ + __cep_timewait_qp( p_cm->h_qp ); + + cid = cl_atomic_xchg( &((al_conn_qp_t*)p_cm->h_qp)->cid, AL_INVALID_CID ); + CL_ASSERT( cid == p_cm->cid || cid == AL_INVALID_CID ); + if( cid == AL_INVALID_CID || + al_destroy_cep( p_cm->h_al, cid, deref_al_obj ) != IB_SUCCESS ) + { + deref_al_obj( &p_cm->h_qp->obj ); + } + + /* Call the user back. */ + ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_rej_cb( &rej_rec ); + } + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__proc_rep( + IN ib_cm_handle_t* const p_cm, + IN mad_cm_rep_t* const p_rep ) +{ + ib_cm_rep_rec_t rep_rec; + + AL_ENTER( AL_DBG_CM ); + + cl_memclr( &rep_rec, sizeof(ib_cm_rep_rec_t) ); + + /* fill the rec callback data */ + rep_rec.p_rep_pdata = p_rep->pdata; + rep_rec.qp_type = p_cm->h_qp->type; + + rep_rec.h_cm_rep = *p_cm; + rep_rec.qp_context = p_cm->h_qp->obj.context; + rep_rec.resp_res = p_rep->resp_resources; + rep_rec.flow_ctrl = conn_rep_get_e2e_flow_ctl( p_rep ); + rep_rec.apr_status = conn_rep_get_failover( p_rep ); + + /* Notify the user of the reply. */ + ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_rep_cb( &rep_rec ); + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__proc_rtu( + IN ib_cm_handle_t* const p_cm, + IN mad_cm_rtu_t* const p_rtu ) +{ + ib_cm_rtu_rec_t rtu_rec; + + AL_ENTER( AL_DBG_CM ); + + rtu_rec.p_rtu_pdata = p_rtu->pdata; + rtu_rec.h_qp = p_cm->h_qp; + rtu_rec.qp_context = p_cm->h_qp->obj.context; + + ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_rtu_cb( &rtu_rec ); + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__proc_dreq( + IN ib_cm_handle_t* const p_cm, + IN mad_cm_dreq_t* const p_dreq ) +{ + ib_cm_dreq_rec_t dreq_rec; + + AL_ENTER( AL_DBG_CM ); + + cl_memclr( &dreq_rec, sizeof(ib_cm_dreq_rec_t) ); + + dreq_rec.h_cm_dreq = *p_cm; + dreq_rec.p_dreq_pdata = p_dreq->pdata; + + dreq_rec.qp_context = p_cm->h_qp->obj.context; + + ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_dreq_cb( &dreq_rec ); + + AL_EXIT( AL_DBG_CM ); +} + + +void +__proc_drep( + IN ib_cm_handle_t* const p_cm, + IN mad_cm_drep_t* const p_drep ) +{ + ib_cm_drep_rec_t drep_rec; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + cl_memclr( &drep_rec, sizeof(ib_cm_drep_rec_t) ); + + /* Copy qp context before the connection is released */ + drep_rec.cm_status = IB_SUCCESS; + drep_rec.p_drep_pdata = p_drep->pdata; + drep_rec.h_qp = p_cm->h_qp; + drep_rec.qp_context = p_cm->h_qp->obj.context; + + ref_al_obj( &p_cm->h_qp->obj ); + + __cep_timewait_qp( p_cm->h_qp ); + + cid = cl_atomic_xchg( &((al_conn_qp_t*)p_cm->h_qp)->cid, AL_INVALID_CID ); + if( cid != AL_INVALID_CID ) + { + CL_ASSERT( cid == p_cm->cid ); + + if( al_destroy_cep( + p_cm->h_al, p_cm->cid, deref_al_obj ) != IB_SUCCESS ) + { + deref_al_obj( &p_cm->h_qp->obj ); + } + } + else + { + deref_al_obj( &p_cm->h_qp->obj ); + } + + ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_drep_cb( &drep_rec ); + + AL_EXIT( AL_DBG_CM ); +} + + +void +__proc_lap( + IN ib_cm_handle_t* const p_cm, + IN const mad_cm_lap_t* const p_lap ) +{ + ib_cm_lap_rec_t lap_rec; + const lap_path_info_t* const p_path = &p_lap->alternate_path; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_cm ); + CL_ASSERT( p_cm->h_qp ); + CL_ASSERT( p_lap ); + + cl_memclr( &lap_rec, sizeof(ib_cm_lap_rec_t) ); + lap_rec.qp_context = p_cm->h_qp->obj.context; + lap_rec.h_cm_lap = *p_cm; + + /* + * Format the path record. The local ack timeout specified in the + * LAP is twice the packet life plus the sender's CA ACK delay. When + * reporting the packet life, we divide the local ack timeout by 2 to + * approach the path's packet lifetime. Since local ack timeout is + * expressed as 4.096 * 2^x, subtracting 1 is equivalent to dividing the + * time in half. + */ + ib_path_rec_init_local( &lap_rec.alt_path, + &p_lap->alternate_path.local_gid, + &p_lap->alternate_path.remote_gid, + p_lap->alternate_path.local_lid, + p_lap->alternate_path.remote_lid, + 1, IB_DEFAULT_PKEY, + conn_lap_path_get_svc_lvl( &p_lap->alternate_path ), + IB_PATH_SELECTOR_EXACTLY, + IB_MTU_2048, + IB_PATH_SELECTOR_EXACTLY, + conn_lap_path_get_pkt_rate( p_path ), + IB_PATH_SELECTOR_EXACTLY, + (uint8_t)( conn_lap_path_get_lcl_ack_timeout( p_path ) - 1 ), + 0 ); + + lap_rec.alt_path.hop_flow_raw.val = 0; + /* Add global routing info as necessary. */ + if( !conn_lap_path_get_subn_lcl( &p_lap->alternate_path ) ) + { + ib_path_rec_set_hop_flow_raw( &lap_rec.alt_path, + p_lap->alternate_path.hop_limit, + conn_lap_path_get_flow_lbl( &p_lap->alternate_path ), + FALSE ); + lap_rec.alt_path.tclass = + conn_lap_path_get_tclass( &p_lap->alternate_path ); + } + + ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_lap_cb( &lap_rec ); + + AL_EXIT( AL_DBG_CM ); +} + + +static ib_api_status_t +__cep_lap_qp( + IN ib_cm_handle_t* const p_cm ) +{ + ib_api_status_t status; + ib_qp_mod_t qp_mod; + + AL_ENTER( AL_DBG_CM ); + + status = al_cep_get_rts_attr( p_cm->h_al, p_cm->cid, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_TRACE( AL_DBG_ERROR, + ("al_cep_get_rts_attr returned %s.\n", ib_get_err_str(status)) ); + goto done; + } + + status = ib_modify_qp( p_cm->h_qp, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_TRACE( AL_DBG_ERROR, + ("ib_modify_qp for LAP returned %s.\n", ib_get_err_str(status)) ); + } + +done: + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static void +__proc_apr( + IN ib_cm_handle_t* const p_cm, + IN mad_cm_apr_t* const p_apr ) +{ + ib_cm_apr_rec_t apr_rec; + + AL_ENTER( AL_DBG_CM ); + + apr_rec.h_qp = p_cm->h_qp; + apr_rec.qp_context = p_cm->h_qp->obj.context; + apr_rec.p_info = (const uint8_t*)&p_apr->info; + apr_rec.info_length = p_apr->info_len; + apr_rec.p_apr_pdata = p_apr->pdata; + apr_rec.apr_status = p_apr->status; + + if( apr_rec.apr_status == IB_AP_SUCCESS ) + { + apr_rec.cm_status = __cep_lap_qp( p_cm ); + } + else + { + apr_rec.cm_status = IB_ERROR; + } + + ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_apr_cb( &apr_rec ); + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__proc_conn( + IN ib_cm_handle_t* const p_cm, + IN ib_mad_t* const p_mad ) +{ + AL_ENTER( AL_DBG_CM ); + + /* Success indicates a receive. */ + switch( p_mad->attr_id ) + { + case CM_REQ_ATTR_ID: + __proc_peer_req( p_cm, (mad_cm_req_t*)p_mad ); + break; + + case CM_MRA_ATTR_ID: + __proc_mra( p_cm, (mad_cm_mra_t*)p_mad ); + break; + + case CM_REJ_ATTR_ID: + __proc_rej( p_cm, (mad_cm_rej_t*)p_mad ); + break; + + case CM_REP_ATTR_ID: + __proc_rep( p_cm, (mad_cm_rep_t*)p_mad ); + break; + + case CM_RTU_ATTR_ID: + __proc_rtu( p_cm, (mad_cm_rtu_t*)p_mad ); + break; + + case CM_DREQ_ATTR_ID: + __proc_dreq( p_cm, (mad_cm_dreq_t*)p_mad ); + break; + + case CM_DREP_ATTR_ID: + __proc_drep( p_cm, (mad_cm_drep_t*)p_mad ); + break; + + case CM_LAP_ATTR_ID: + __proc_lap( p_cm, (mad_cm_lap_t*)p_mad ); + break; + + case CM_APR_ATTR_ID: + __proc_apr( p_cm, (mad_cm_apr_t*)p_mad ); + break; + + //case CM_SIDR_REQ_ATTR_ID: + // p_async_mad->item.pfn_callback = __process_cm_sidr_req; + // break; + + //case CM_SIDR_REP_ATTR_ID: + // p_async_mad->item.pfn_callback = __process_cm_sidr_rep; + // break; + + default: + AL_TRACE( AL_DBG_ERROR, + ("Invalid CM recv MAD attribute ID %d.\n", p_mad->attr_id) ); + } + + AL_EXIT( AL_DBG_CM ); +} + +/****************************************************************************** +* CEP callback handler. +* +*/ + +#ifdef CL_KERNEL +static void +__process_cep_cb( +#else +static void +__cm_handler( +#endif + IN const ib_al_handle_t h_al, + IN ib_cep_t* const p_cep ) +{ + ib_api_status_t status; + ib_cep_t new_cep; + ib_mad_element_t *p_mad; + ib_cm_handle_t h_cm; + + AL_ENTER( AL_DBG_CM ); + + for( status = al_cep_poll( h_al, p_cep->cid, &new_cep, &p_mad ); + status == IB_SUCCESS; + status = al_cep_poll( h_al, p_cep->cid, &new_cep, &p_mad ) ) + { + /* Something to do - WOOT!!! */ + if( new_cep.cid != AL_INVALID_CID ) + { + __proc_listen( (al_listen_t*)p_cep->context, + &new_cep, ib_get_mad_buf( p_mad ) ); + } + else if( p_mad->status != IB_SUCCESS ) + { + /* Context is a QP handle, and a sent MAD timed out. */ + __proc_failed_send( + (ib_qp_handle_t)p_cep->context, ib_get_mad_buf( p_mad ) ); + } + else + { + h_cm.h_al = h_al; + h_cm.cid = p_cep->cid; + h_cm.h_qp = (ib_qp_handle_t)p_cep->context; + __proc_conn( &h_cm, ib_get_mad_buf( p_mad ) ); + } + ib_put_mad( p_mad ); + } +} + + +#ifdef CL_KERNEL + +static void +__process_cep_async( + IN cl_async_proc_item_t *p_item ) +{ + cep_async_mad_t *p_async_mad; + + AL_ENTER( AL_DBG_CM ); + + p_async_mad = PARENT_STRUCT( p_item, cep_async_mad_t, item ); + + __process_cep_cb( p_async_mad->h_al, &p_async_mad->cep ); + + cl_free( p_async_mad ); + + AL_EXIT( AL_DBG_CM ); +} + + +/* + * The handler is invoked at DISPATCH_LEVEL in kernel mode. We need to switch + * to a passive level thread context to perform QP modify and invoke user + * callbacks. + */ +static void +__cm_handler( + IN const ib_al_handle_t h_al, + IN ib_cep_t* const p_cep ) +{ + cep_async_mad_t *p_async_mad; + + AL_ENTER( AL_DBG_CM ); + + p_async_mad = (cep_async_mad_t*)cl_zalloc( sizeof(cep_async_mad_t) ); + if( !p_async_mad ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("failed to cl_zalloc cm_async_mad_t (%d bytes)\n", + sizeof(cep_async_mad_t)) ); + return; + } + + p_async_mad->h_al = h_al; + p_async_mad->cep = *p_cep; + p_async_mad->item.pfn_callback = __process_cep_async; + + /* Queue the MAD for asynchronous processing. */ + cl_async_proc_queue( gp_async_proc_mgr, &p_async_mad->item ); + + AL_EXIT( AL_DBG_CM ); +} +#endif /* CL_KERNEL */ + + +/* + * Transition the QP to the INIT state, if it is not already in the + * INIT state. + */ +ib_api_status_t +__cep_init_qp( + IN const ib_qp_handle_t h_qp, + IN ib_qp_mod_t* const p_init ) +{ + ib_qp_mod_t qp_mod; + ib_api_status_t status; + + /* + * Move to the init state to allow posting of receive buffers. + * Chech the current state of the QP. The user may have already + * transitioned it and posted some receives to the QP, so we + * should not reset the QP if it is already in the INIT state. + */ + if( h_qp->state != IB_QPS_INIT ) + { + /* Reset the QP. */ + cl_memclr( &qp_mod, sizeof(ib_qp_mod_t) ); + qp_mod.req_state = IB_QPS_RESET; + + status = ib_modify_qp( h_qp, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_TRACE( AL_DBG_ERROR, + ("ib_modify_qp to IB_QPS_RESET returned %s\n", + ib_get_err_str(status) ) ); + } + + /* Initialize the QP. */ + status = ib_modify_qp( h_qp, p_init ); + if( status != IB_SUCCESS ) + { + AL_TRACE( AL_DBG_ERROR, + ("ib_modify_qp returned %s.\n", ib_get_err_str(status) ) ); + return status; + } + } + + return IB_SUCCESS; +} + +static ib_api_status_t +__cep_pre_req( + IN const ib_cm_req_t* const p_cm_req ) +{ + ib_api_status_t status; + ib_qp_mod_t qp_mod; + + AL_ENTER( AL_DBG_CM ); + + status = al_cep_pre_req( qp_get_al( p_cm_req->h_qp ), + ((al_conn_qp_t*)p_cm_req->h_qp)->cid, p_cm_req, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("al_cep_pre_req returned %s.\n", ib_get_err_str( status )) ); + return status; + } + + /* Transition QP through state machine */ + /* + * Warning! Using all access rights. We need to modify + * the ib_cm_req_t to include this. + */ + qp_mod.state.init.access_ctrl |= + IB_AC_RDMA_READ | IB_AC_RDMA_WRITE | IB_AC_ATOMIC; + status = __cep_init_qp( p_cm_req->h_qp, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("__cep_init_qp returned %s\n", ib_get_err_str(status)) ); + return status; + } + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +static ib_api_status_t +__cep_conn_req( + IN const ib_al_handle_t h_al, + IN const ib_cm_req_t* const p_cm_req ) +{ + ib_api_status_t status; + //cl_status_t cl_status; + //cl_event_t sync_event; + //cl_event_t *p_sync_event = NULL; + al_conn_qp_t *p_qp; + net32_t cid, old_cid; + + AL_ENTER( AL_DBG_CM ); + + /* event based mechanism */ + if( p_cm_req->flags & IB_FLAGS_SYNC ) + { + AL_EXIT( AL_DBG_CM ); + return IB_UNSUPPORTED; + //cl_event_construct( &sync_event ); + //cl_status = cl_event_init( &sync_event, FALSE ); + //if( cl_status != CL_SUCCESS ) + //{ + // __deref_conn( p_conn ); + // return ib_convert_cl_status( cl_status ); + //} + //p_conn->p_sync_event = p_sync_event = &sync_event; + } + + p_qp = (al_conn_qp_t*)p_cm_req->h_qp; + + /* Get a CEP and bind it to the QP. */ + status = al_create_cep( h_al, __cm_handler, p_cm_req->h_qp, &cid ); + if( status != IB_SUCCESS ) + { + AL_TRACE( AL_DBG_ERROR, + ("al_create_cep returned %s.\n", ib_get_err_str( status )) ); + goto done; + } + + /* See if this QP has already been connected. */ + old_cid = cl_atomic_comp_xchg( &p_qp->cid, AL_INVALID_CID, cid ); + if( old_cid != AL_INVALID_CID ) + { + al_destroy_cep( h_al, cid, NULL ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_STATE; + } + + status = __cep_pre_req( p_cm_req ); + if( status != IB_SUCCESS ) + { + AL_TRACE( AL_DBG_ERROR, + ("__cep_pre_req returned %s.\n", ib_get_err_str( status )) ); + goto err; + } + + /* Store callback pointers. */ + p_qp->pfn_cm_req_cb = p_cm_req->pfn_cm_req_cb; + p_qp->pfn_cm_rep_cb = p_cm_req->pfn_cm_rep_cb; + p_qp->pfn_cm_mra_cb = p_cm_req->pfn_cm_mra_cb; + p_qp->pfn_cm_rej_cb = p_cm_req->pfn_cm_rej_cb; + + /* Send the REQ. */ + status = al_cep_send_req( h_al, p_qp->cid ); + if( status != IB_SUCCESS ) + { + //if( p_sync_event ) + // cl_event_destroy( p_sync_event ); + + AL_TRACE( AL_DBG_ERROR, + ("al_cep_send_req returned %s.\n", ib_get_err_str(status)) ); +err: + ref_al_obj( &p_qp->qp.obj ); + cl_atomic_xchg( &p_qp->cid, AL_INVALID_CID ); + if( al_destroy_cep( h_al, cid, deref_al_obj ) != IB_SUCCESS ) + deref_al_obj( &p_qp->qp.obj ); + } + + /* wait on event if synchronous operation */ + //if( p_sync_event ) + //{ + // CL_TRACE( AL_DBG_CM, g_al_dbg_lvl, + // ("event blocked on REQ...\n") ); + // cl_event_wait_on( p_sync_event, EVENT_NO_TIMEOUT, FALSE ); + + // cl_event_destroy( p_sync_event ); + //} + +done: + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +ib_cm_req( + IN const ib_cm_req_t* const p_cm_req ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + if( !p_cm_req ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Only supported qp types allowed */ + switch( p_cm_req->qp_type ) + { + default: + AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid qp_type.\n") ); + return IB_INVALID_SETTING; + + case IB_QPT_RELIABLE_CONN: + case IB_QPT_UNRELIABLE_CONN: + if( AL_OBJ_INVALID_HANDLE( p_cm_req->h_qp, AL_OBJ_TYPE_H_QP ) || + (p_cm_req->h_qp->type != p_cm_req->qp_type) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + + status = __cep_conn_req( qp_get_al( p_cm_req->h_qp ), p_cm_req ); + break; + + case IB_QPT_UNRELIABLE_DGRM: + if( AL_OBJ_INVALID_HANDLE( p_cm_req->h_al, AL_OBJ_TYPE_H_AL ) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") ); + return IB_INVALID_AL_HANDLE; + } + status = IB_UNSUPPORTED; +// status = cm_sidr_req( p_cm_req->h_al, p_cm_req ); + break; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +/* + * Note: we pass in the QP handle separately because it comes form different + * sources. It comes from the ib_cm_rep_t structure in the ib_cm_rep path, and + * from the ib_cm_handle_t structure in the ib_cm_rtu path. + */ +static ib_api_status_t +__cep_rts_qp( + IN const ib_cm_handle_t h_cm, + IN const ib_qp_handle_t h_qp, + IN const ib_access_t access_ctrl, + IN const uint32_t sq_depth, + IN const uint32_t rq_depth ) +{ + ib_api_status_t status; + ib_qp_mod_t qp_mod; + + AL_ENTER( AL_DBG_CM ); + + /* Set the QP to RTR. */ + status = al_cep_get_rtr_attr( h_cm.h_al, h_cm.cid, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("al_cep_get_rtr_attr returned %s\n", ib_get_err_str( status )) ); + return status; + } + + if( access_ctrl ) + { + qp_mod.state.rtr.access_ctrl = access_ctrl; + qp_mod.state.rtr.opts |= IB_MOD_QP_ACCESS_CTRL; + } + + if( sq_depth ) + { + qp_mod.state.rtr.sq_depth = sq_depth; + qp_mod.state.rtr.opts |= IB_MOD_QP_SQ_DEPTH; + } + + if( rq_depth ) + { + qp_mod.state.rtr.rq_depth = rq_depth; + qp_mod.state.rtr.opts |= IB_MOD_QP_RQ_DEPTH; + } + + status = ib_modify_qp( h_qp, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("ib_modify_qp to RTR returned %s.\n", ib_get_err_str(status) ) ); + return status; + } + + /* Set the QP to RTS. */ + status = al_cep_get_rts_attr( h_cm.h_al, h_cm.cid, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("al_cep_get_rts_attr returned %s\n", ib_get_err_str( status )) ); + return status; + } + + status = ib_modify_qp( h_qp, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("ib_modify_qp to RTS returned %s.\n", ib_get_err_str(status) ) ); + return status; + } + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +static ib_api_status_t +__cep_pre_rep( + IN const ib_cm_handle_t h_cm, + IN const ib_cm_rep_t* const p_cm_rep ) +{ + ib_api_status_t status; + al_conn_qp_t *p_qp; + ib_qp_mod_t qp_mod; + + AL_ENTER( AL_DBG_CM ); + + p_qp = (al_conn_qp_t*)p_cm_rep->h_qp; + + status = al_cep_pre_rep( + h_cm.h_al, h_cm.cid, p_cm_rep->h_qp, p_cm_rep, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("al_cep_pre_rep returned %s.\n", ib_get_err_str( status )) ); + return status; + } + + /* Transition the QP to the INIT state. */ + qp_mod.state.init.access_ctrl = p_cm_rep->access_ctrl; + status = __cep_init_qp( p_cm_rep->h_qp, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("cm_init_qp returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Prepost receives. */ + if( p_cm_rep->p_recv_wr ) + { + status = ib_post_recv( p_cm_rep->h_qp, p_cm_rep->p_recv_wr, + (ib_recv_wr_t** __ptr64)p_cm_rep->pp_recv_failure ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("ib_post_recv returned %s.\n", ib_get_err_str(status)) ); + return status; + } + } + + /* Transition the QP to the RTR and RTS states. */ + status = __cep_rts_qp( h_cm, p_cm_rep->h_qp, + p_cm_rep->access_ctrl, p_cm_rep->sq_depth, p_cm_rep->rq_depth ); + if( status != IB_SUCCESS ) + { + AL_TRACE( AL_DBG_ERROR, + ("__cep_rts_qp returned %s.\n", ib_get_err_str(status)) ); + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static ib_api_status_t +__cep_conn_rep( + IN ib_cm_handle_t h_cm, + IN const ib_cm_rep_t* const p_cm_rep ) +{ + ib_api_status_t status; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + cid = cl_atomic_comp_xchg( + &((al_conn_qp_t*)p_cm_rep->h_qp)->cid, AL_INVALID_CID, h_cm.cid ); + + if( cid != AL_INVALID_CID ) + { + /* We don't destroy the CEP to allow the user to retry accepting. */ + AL_TRACE_EXIT( AL_DBG_ERROR, ("QP already connected.\n") ); + return IB_INVALID_QP_HANDLE; + } + + /* Store the CM callbacks. */ + ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_rej_cb = p_cm_rep->pfn_cm_rej_cb; + ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_mra_cb = p_cm_rep->pfn_cm_mra_cb; + ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_rtu_cb = p_cm_rep->pfn_cm_rtu_cb; + ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_lap_cb = p_cm_rep->pfn_cm_lap_cb; + ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_dreq_cb = p_cm_rep->pfn_cm_dreq_cb; + + /* Transition QP through state machine */ + status = __cep_pre_rep( h_cm, p_cm_rep ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("__cep_pre_req returned %s\n", ib_get_err_str(status)) ); + goto err; + } + + status = al_cep_send_rep( h_cm.h_al, h_cm.cid ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("al_cep_send_rep returned %s\n", ib_get_err_str(status)) ); +err: + cl_atomic_xchg( + &((al_conn_qp_t*)p_cm_rep->h_qp)->cid, AL_INVALID_CID ); + + ref_al_obj( &p_cm_rep->h_qp->obj ); + + /* Reject and abort the connection. */ + al_cep_rej( h_cm.h_al, h_cm.cid, IB_REJ_INSUF_QP, NULL, 0, NULL, 0 ); + + if( al_destroy_cep( h_cm.h_al, h_cm.cid, deref_al_obj ) != IB_SUCCESS ) + deref_al_obj( &p_cm_rep->h_qp->obj ); + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +ib_cm_rep( + IN const ib_cm_handle_t h_cm_req, + IN const ib_cm_rep_t* const p_cm_rep ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + if( !p_cm_rep ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Only supported qp types allowed */ + status = IB_SUCCESS; + switch( p_cm_rep->qp_type ) + { + default: + AL_TRACE( AL_DBG_ERROR, ("Invalid qp_type.\n") ); + status = IB_INVALID_SETTING; + break; + + case IB_QPT_RELIABLE_CONN: + case IB_QPT_UNRELIABLE_CONN: + if( AL_OBJ_INVALID_HANDLE( p_cm_rep->h_qp, AL_OBJ_TYPE_H_QP ) || + (p_cm_rep->h_qp->type != p_cm_rep->qp_type) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + status = IB_INVALID_QP_HANDLE; + } + if( p_cm_rep->h_qp->obj.h_al != h_cm_req.h_al ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + status = IB_INVALID_QP_HANDLE; + } + break; + + case IB_QPT_UNRELIABLE_DGRM: + if( ( p_cm_rep->status == IB_SIDR_SUCCESS ) && + (AL_OBJ_INVALID_HANDLE( p_cm_rep->h_qp, AL_OBJ_TYPE_H_QP ) || + (p_cm_rep->h_qp->type != p_cm_rep->qp_type) ) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + status = IB_INVALID_QP_HANDLE; + } + break; + } + + if( status != IB_SUCCESS ) + { + al_cep_rej( + h_cm_req.h_al, h_cm_req.cid, IB_REJ_INSUF_QP, NULL, 0, NULL, 0 ); + al_destroy_cep( h_cm_req.h_al, h_cm_req.cid, NULL ); + + AL_EXIT( AL_DBG_CM ); + return status; + } + + if( p_cm_rep->qp_type == IB_QPT_UNRELIABLE_DGRM ) + status = IB_UNSUPPORTED;//status = cm_sidr_rep( p_conn, p_cm_rep ); + else + status = __cep_conn_rep( h_cm_req, p_cm_rep ); + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +ib_cm_rtu( + IN const ib_cm_handle_t h_cm_rep, + IN const ib_cm_rtu_t* const p_cm_rtu ) +{ + ib_api_status_t status; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + if( !p_cm_rtu ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + ///* + // * Call invalid if event is still processed. + // * User may have called rtu in rep callback. + // */ + //if( p_conn->p_sync_event ) + //{ + // CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, + // ("Connection in invalid state. Sync call in progress.\n" ) ); + + // cm_res_release( p_conn ); + // __deref_conn( p_conn ); + // return IB_INVALID_STATE; + //} + ((al_conn_qp_t*)h_cm_rep.h_qp)->pfn_cm_apr_cb = p_cm_rtu->pfn_cm_apr_cb; + ((al_conn_qp_t*)h_cm_rep.h_qp)->pfn_cm_dreq_cb = p_cm_rtu->pfn_cm_dreq_cb; + + /* Transition QP through state machine */ + status = __cep_rts_qp( h_cm_rep, h_cm_rep.h_qp, + p_cm_rtu->access_ctrl, p_cm_rtu->sq_depth, p_cm_rtu->rq_depth ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("__cep_rts_qp returned %s.\n", ib_get_err_str( status )) ); + goto err; + } + + status = al_cep_rtu( h_cm_rep.h_al, h_cm_rep.cid, + p_cm_rtu->p_rtu_pdata, p_cm_rtu->rtu_length ); + if( status != IB_SUCCESS ) + { +err: + /* Reject and abort the connection. */ + al_cep_rej( + h_cm_rep.h_al, h_cm_rep.cid, IB_REJ_INSUF_QP, NULL, 0, NULL, 0 ); + + __cep_timewait_qp( h_cm_rep.h_qp ); + + cid = cl_atomic_xchg( + &((al_conn_qp_t*)h_cm_rep.h_qp)->cid, AL_INVALID_CID ); + + CL_ASSERT( cid == h_cm_rep.cid ); + + ref_al_obj( &h_cm_rep.h_qp->obj ); + if( al_destroy_cep( + h_cm_rep.h_al, h_cm_rep.cid, deref_al_obj ) != IB_SUCCESS ) + { + deref_al_obj( &h_cm_rep.h_qp->obj ); + } + + AL_TRACE_EXIT( AL_DBG_ERROR, + ("al_cep_rtu returned %s.\n", ib_get_err_str( status )) ); + return status; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +ib_cm_mra( + IN const ib_cm_handle_t h_cm, + IN const ib_cm_mra_t* const p_cm_mra ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + if( !p_cm_mra ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = al_cep_mra( h_cm.h_al, h_cm.cid, p_cm_mra ); + if( status != IB_SUCCESS ) + { + AL_TRACE( AL_DBG_ERROR, + ("al_cep_mra returned %s\n", ib_get_err_str( status )) ); + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +ib_cm_rej( + IN const ib_cm_handle_t h_cm, + IN const ib_cm_rej_t* const p_cm_rej ) +{ + ib_api_status_t status; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + if( !p_cm_rej ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = al_cep_rej( h_cm.h_al, h_cm.cid, p_cm_rej->rej_status, + p_cm_rej->p_ari->data, p_cm_rej->ari_length, + p_cm_rej->p_rej_pdata, p_cm_rej->rej_length ); + + if( h_cm.h_qp ) + { + __cep_timewait_qp( h_cm.h_qp ); + + cid = cl_atomic_xchg( + &((al_conn_qp_t*)h_cm.h_qp)->cid, AL_INVALID_CID ); + if( cid != AL_INVALID_CID ) + { + ref_al_obj( &h_cm.h_qp->obj ); + if( al_destroy_cep( h_cm.h_al, h_cm.cid, deref_al_obj ) != IB_SUCCESS ) + deref_al_obj( &h_cm.h_qp->obj ); + } + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +ib_cm_dreq( + IN const ib_cm_dreq_t* const p_cm_dreq ) +{ + ib_api_status_t status; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + if( !p_cm_dreq ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Only supported qp types allowed */ + switch( p_cm_dreq->qp_type ) + { + default: + AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid qp_type.\n") ); + return IB_INVALID_SETTING; + + case IB_QPT_RELIABLE_CONN: + case IB_QPT_UNRELIABLE_CONN: + if( AL_OBJ_INVALID_HANDLE( p_cm_dreq->h_qp, AL_OBJ_TYPE_H_QP ) || + (p_cm_dreq->h_qp->type != p_cm_dreq->qp_type) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + break; + } + + /* Store the callback pointers. */ + ((al_conn_qp_t*)p_cm_dreq->h_qp)->pfn_cm_drep_cb = + p_cm_dreq->pfn_cm_drep_cb; + + status = al_cep_dreq( p_cm_dreq->h_qp->obj.h_al, + ((al_conn_qp_t*)p_cm_dreq->h_qp)->cid, + p_cm_dreq->p_dreq_pdata, p_cm_dreq->dreq_length ); + switch( status ) + { + case IB_INVALID_STATE: + case IB_INVALID_HANDLE: + case IB_INVALID_PARAMETER: + case IB_INVALID_SETTING: + /* Bad call - don't touch the QP. */ + break; + + case IB_SUCCESS: + /* Wait for the DREP or timeout. */ + break; + + default: + /* + * If we failed to send the DREQ, just release the connection. It's + * unreliable anyway. The local port may be down. Note that we could + * not send the DREQ, but we still could have received one. The DREQ + * will have a reference on the connection until the user calls + * ib_cm_drep. + */ + __cep_timewait_qp( p_cm_dreq->h_qp ); + + cid = cl_atomic_xchg( + &((al_conn_qp_t*)p_cm_dreq->h_qp)->cid, AL_INVALID_CID ); + ref_al_obj( &p_cm_dreq->h_qp->obj ); + if( cid == AL_INVALID_CID || al_destroy_cep( + p_cm_dreq->h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS ) + { + deref_al_obj( &p_cm_dreq->h_qp->obj ); + } + status = IB_SUCCESS; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + + +ib_api_status_t +ib_cm_drep( + IN const ib_cm_handle_t h_cm_dreq, + IN const ib_cm_drep_t* const p_cm_drep ) +{ + ib_api_status_t status; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + if( !p_cm_drep ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = al_cep_drep( h_cm_dreq.h_al, h_cm_dreq.cid, p_cm_drep ); + switch( status ) + { + case IB_INVALID_SETTING: + case IB_INVALID_HANDLE: + case IB_INVALID_PARAMETER: + case IB_INVALID_STATE: + /* Bad call - don't touch the QP. */ + break; + + default: + /* + * Some other out-of-resource error - continue as if we succeeded in + * sending the DREP. + */ + status = IB_SUCCESS; + /* Fall through */ + case IB_SUCCESS: + __cep_timewait_qp( h_cm_dreq.h_qp ); + + cid = cl_atomic_xchg( + &((al_conn_qp_t*)h_cm_dreq.h_qp)->cid, AL_INVALID_CID ); + if( cid != AL_INVALID_CID ) + { + CL_ASSERT( cid == h_cm_dreq.cid ); + ref_al_obj( &h_cm_dreq.h_qp->obj ); + if( al_destroy_cep( + h_cm_dreq.h_al, h_cm_dreq.cid, deref_al_obj ) != IB_SUCCESS ) + { + deref_al_obj( &h_cm_dreq.h_qp->obj ); + } + } + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +ib_cm_lap( + IN const ib_cm_lap_t* const p_cm_lap ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + if( !p_cm_lap ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Only supported qp types allowed */ + switch( p_cm_lap->qp_type ) + { + default: + AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid qp_type.\n") ); + return IB_INVALID_SETTING; + + case IB_QPT_RELIABLE_CONN: + case IB_QPT_UNRELIABLE_CONN: + if( AL_OBJ_INVALID_HANDLE( p_cm_lap->h_qp, AL_OBJ_TYPE_H_QP ) || + (p_cm_lap->h_qp->type != p_cm_lap->qp_type) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + break; + } + + status = al_cep_lap( p_cm_lap->h_qp->obj.h_al, + ((al_conn_qp_t*)p_cm_lap->h_qp)->cid, p_cm_lap ); + if( status != IB_SUCCESS ) + { + AL_TRACE( AL_DBG_ERROR, + ("al_cep_lap returned %s.\n", ib_get_err_str( status )) ); + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +ib_cm_apr( + IN const ib_cm_handle_t h_cm_lap, + IN const ib_cm_apr_t* const p_cm_apr ) +{ + ib_api_status_t status; + ib_qp_mod_t qp_mod; + + AL_ENTER( AL_DBG_CM ); + + if( !p_cm_apr ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Only supported qp types allowed */ + switch( p_cm_apr->qp_type ) + { + default: + AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid qp_type.\n") ); + return IB_INVALID_SETTING; + + case IB_QPT_RELIABLE_CONN: + case IB_QPT_UNRELIABLE_CONN: + if( AL_OBJ_INVALID_HANDLE( p_cm_apr->h_qp, AL_OBJ_TYPE_H_QP ) || + (p_cm_apr->h_qp->type != p_cm_apr->qp_type) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + break; + } + + status = al_cep_pre_apr( h_cm_lap.h_al, h_cm_lap.cid, p_cm_apr, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("al_cep_pre_apr returned %s.\n", ib_get_err_str( status )) ); + return status; + } + + /* Load alt path into QP */ + status = ib_modify_qp( h_cm_lap.h_qp, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("ib_modify_qp for LAP returned %s.\n", + ib_get_err_str( status )) ); + return status; + } + + status = al_cep_send_apr( h_cm_lap.h_al, h_cm_lap.cid ); + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +ib_force_apm( + IN const ib_qp_handle_t h_qp ) +{ + ib_api_status_t status; + al_conn_qp_t *p_conn_qp; + ib_qp_mod_t qp_mod; + + AL_ENTER( AL_DBG_CM ); + + if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + + p_conn_qp = PARENT_STRUCT( h_qp, al_conn_qp_t, qp ); + cl_memclr( &qp_mod, sizeof(ib_qp_mod_t) ); + qp_mod.req_state = IB_QPS_RTS; + qp_mod.state.rts.apm_state = IB_APM_MIGRATED; + qp_mod.state.rts.opts = IB_MOD_QP_APM_STATE; + + /* Set the QP to RTS. */ + status = ib_modify_qp( h_qp, &qp_mod ); + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static void +__destroying_listen( + IN al_obj_t* p_obj ) +{ + ib_api_status_t status; + al_listen_t *p_listen; + + p_listen = PARENT_STRUCT( p_obj, al_listen_t, obj ); + + /* Destroy the listen's CEP. */ + status = al_destroy_cep( + p_obj->h_al, p_listen->cid, deref_al_obj ); + if( status != IB_SUCCESS ) + { + AL_TRACE( AL_DBG_ERROR, + ("al_destroy_cep returned %s.\n", ib_get_err_str( status )) ); + deref_al_obj( p_obj ); + } +} + + + +static void +__free_listen( + IN al_obj_t* p_obj ) +{ + destroy_al_obj( p_obj ); + cl_free( PARENT_STRUCT( p_obj, al_listen_t, obj ) ); +} + + +static ib_api_status_t +__cep_listen( + IN const ib_al_handle_t h_al, + IN const ib_cm_listen_t* const p_cm_listen, + IN const ib_pfn_listen_err_cb_t pfn_listen_err_cb, + IN const void* const listen_context, + OUT ib_listen_handle_t* const ph_cm_listen ) +{ + ib_api_status_t status; + al_listen_t *p_listen; + ib_cep_listen_t cep_listen; + + AL_ENTER( AL_DBG_CM ); + + UNUSED_PARAM( pfn_listen_err_cb ); + + /* Allocate the listen object. */ + p_listen = (al_listen_t*)cl_zalloc( sizeof(al_listen_t) ); + if( !p_listen ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Copy the listen request information for matching incoming requests. */ + p_listen->pfn_cm_req_cb = p_cm_listen->pfn_cm_req_cb; + + /* valid for ud qp_type only */ + p_listen->sidr_context = p_cm_listen->sidr_context; + + construct_al_obj( &p_listen->obj, AL_OBJ_TYPE_H_LISTEN ); + status = init_al_obj( &p_listen->obj, listen_context, TRUE, + __destroying_listen, NULL, __free_listen ); + if( status != IB_SUCCESS ) + { + __free_listen( &p_listen->obj ); + AL_EXIT( AL_DBG_CM ); + return status; + } + + /* Add the listen to the AL instance's object list. */ + status = attach_al_obj( &h_al->obj, &p_listen->obj ); + if( status != IB_SUCCESS ) + { + p_listen->obj.pfn_destroy( &p_listen->obj, NULL ); + AL_TRACE_EXIT( AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Create a CEP to listen on. */ + status = al_create_cep( h_al, __cm_handler, p_listen, &p_listen->cid ); + if( status != IB_SUCCESS ) + { + p_listen->obj.pfn_destroy( &p_listen->obj, NULL ); + AL_TRACE_EXIT( AL_DBG_ERROR, + ("al_create_cep returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + cep_listen.cmp_len = p_cm_listen->compare_length; + cep_listen.cmp_offset = p_cm_listen->compare_offset; + cep_listen.p_cmp_buf = p_cm_listen->p_compare_buffer; + cep_listen.port_guid = p_cm_listen->port_guid; + cep_listen.svc_id = p_cm_listen->svc_id; + + status = al_cep_listen( h_al, p_listen->cid, &cep_listen ); + if( status != IB_SUCCESS ) + { + p_listen->obj.pfn_destroy( &p_listen->obj, NULL ); + AL_TRACE_EXIT( AL_DBG_ERROR, + ("al_cep_listen returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + *ph_cm_listen = p_listen; + + /* Note that we keep the reference held on behalf of the CEP. */ + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +ib_cm_listen( + IN const ib_al_handle_t h_al, + IN const ib_cm_listen_t* const p_cm_listen, + IN const ib_pfn_listen_err_cb_t pfn_listen_err_cb, + IN const void* const listen_context, + OUT ib_listen_handle_t* const ph_cm_listen ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) ) + { + CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_AL_HANDLE\n") ); + return IB_INVALID_AL_HANDLE; + } + if( !p_cm_listen || !pfn_listen_err_cb || !ph_cm_listen ) + { + CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = __cep_listen(h_al, p_cm_listen, pfn_listen_err_cb, listen_context, + ph_cm_listen ); + + CL_EXIT( AL_DBG_CM, g_al_dbg_lvl ); + return status; +} + + +ib_api_status_t +ib_cm_cancel( + IN const ib_listen_handle_t h_cm_listen, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ) +{ + AL_ENTER( AL_DBG_CM ); + + if( AL_OBJ_INVALID_HANDLE( h_cm_listen, AL_OBJ_TYPE_H_LISTEN ) ) + { + CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + + ref_al_obj( &h_cm_listen->obj ); + h_cm_listen->obj.pfn_destroy( &h_cm_listen->obj, pfn_destroy_cb ); + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +ib_cm_handoff( + IN const ib_cm_handle_t h_cm_req, + IN const ib_net64_t svc_id ) +{ + UNUSED_PARAM( h_cm_req ); + UNUSED_PARAM( svc_id ); + return IB_UNSUPPORTED; +} diff --git a/trunk/core/al/al_dev.h b/trunk/core/al/al_dev.h index 23264b38..4d27bd3d 100644 --- a/trunk/core/al/al_dev.h +++ b/trunk/core/al/al_dev.h @@ -318,11 +318,46 @@ typedef enum _al_cm_sidr_ops #define IS_CM_IOCTL(cmd) \ ((cmd) > AL_CM_OPS_START && (cmd) < AL_CM_MAXOPS) + +enum _ual_cep_ops +{ + al_cep_ops_start = al_ioc_maxops, + ual_create_cep, + ual_destroy_cep, + ual_cep_listen, + ual_cep_pre_req, + ual_cep_send_req, + ual_cep_pre_rep, + ual_cep_send_rep, + ual_cep_get_rtr, + ual_cep_get_rts, + ual_cep_rtu, + ual_cep_rej, + ual_cep_mra, + ual_cep_lap, + ual_cep_pre_apr, + ual_cep_send_apr, + ual_cep_dreq, + ual_cep_drep, + ual_cep_get_timewait, + ual_cep_get_event, + ual_cep_poll, + + al_cep_maxops + +} ual_cep_ops_t; + +#define UAL_CEP_OPS_START IOCTL_CODE(ALDEV_KEY, al_cep_ops_start) +#define UAL_CEP_MAXOPS IOCTL_CODE(ALDEV_KEY, al_cep_maxops) +#define IS_CEP_IOCTL(cmd) \ + ((cmd) > UAL_CEP_OPS_START && (cmd) < UAL_CEP_MAXOPS) + + /* AL ioctls */ typedef enum _al_dev_ops { - al_ops_start = al_cm_maxops, + al_ops_start = al_cep_maxops, ual_reg_shmid_cmd, ual_get_ca_attr, @@ -416,6 +451,28 @@ typedef enum _al_dev_ops #define UAL_CM_APR IOCTL_CODE(ALDEV_KEY, ual_cm_apr_cmd) #define UAL_CM_FORCE_APM IOCTL_CODE(ALDEV_KEY, ual_force_apm_cmd) +/* CEP Related IOCTL commands */ +#define UAL_CREATE_CEP IOCTL_CODE(ALDEV_KEY, ual_create_cep) +#define UAL_DESTROY_CEP IOCTL_CODE(ALDEV_KEY, ual_destroy_cep) +#define UAL_CEP_LISTEN IOCTL_CODE(ALDEV_KEY, ual_cep_listen) +#define UAL_CEP_PRE_REQ IOCTL_CODE(ALDEV_KEY, ual_cep_pre_req) +#define UAL_CEP_SEND_REQ IOCTL_CODE(ALDEV_KEY, ual_cep_send_req) +#define UAL_CEP_PRE_REP IOCTL_CODE(ALDEV_KEY, ual_cep_pre_rep) +#define UAL_CEP_SEND_REP IOCTL_CODE(ALDEV_KEY, ual_cep_send_rep) +#define UAL_CEP_GET_RTR IOCTL_CODE(ALDEV_KEY, ual_cep_get_rtr) +#define UAL_CEP_GET_RTS IOCTL_CODE(ALDEV_KEY, ual_cep_get_rts) +#define UAL_CEP_RTU IOCTL_CODE(ALDEV_KEY, ual_cep_rtu) +#define UAL_CEP_REJ IOCTL_CODE(ALDEV_KEY, ual_cep_rej) +#define UAL_CEP_MRA IOCTL_CODE(ALDEV_KEY, ual_cep_mra) +#define UAL_CEP_LAP IOCTL_CODE(ALDEV_KEY, ual_cep_lap) +#define UAL_CEP_PRE_APR IOCTL_CODE(ALDEV_KEY, ual_cep_pre_apr) +#define UAL_CEP_SEND_APR IOCTL_CODE(ALDEV_KEY, ual_cep_send_apr) +#define UAL_CEP_DREQ IOCTL_CODE(ALDEV_KEY, ual_cep_dreq) +#define UAL_CEP_DREP IOCTL_CODE(ALDEV_KEY, ual_cep_drep) +#define UAL_CEP_GET_TIMEWAIT IOCTL_CODE(ALDEV_KEY, ual_cep_get_timewait) +#define UAL_CEP_GET_EVENT IOCTL_CODE(ALDEV_KEY, ual_cep_get_event) +#define UAL_CEP_POLL IOCTL_CODE(ALDEV_KEY, ual_cep_poll) + #define UAL_GET_CA_ATTR_INFO IOCTL_CODE(ALDEV_KEY, ual_get_ca_attr) /* PnP related ioctl commands. */ diff --git a/trunk/core/al/al_dm.c b/trunk/core/al/al_dm.c index 09b53f24..b823705d 100644 --- a/trunk/core/al/al_dm.c +++ b/trunk/core/al/al_dm.c @@ -213,7 +213,7 @@ ib_create_ioc( CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_CA_HANDLE\n") ); return IB_INVALID_CA_HANDLE; } - if( !p_ioc_profile || ph_ioc ) + if( !p_ioc_profile || !ph_ioc ) { CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") ); return IB_INVALID_PARAMETER; diff --git a/trunk/core/al/al_dm.h b/trunk/core/al/al_dm.h index 1dc97f94..f220dd87 100644 --- a/trunk/core/al/al_dm.h +++ b/trunk/core/al/al_dm.h @@ -91,6 +91,7 @@ typedef enum _ioc_state /* An IOC represents a slot in an IO unit */ } ioc_state_t; +#pragma warning(disable:4324) typedef struct _al_ioc { al_obj_t obj; /* Child of ib_ca_t */ @@ -104,6 +105,7 @@ typedef struct _al_ioc atomic32_t in_use_cnt; } al_ioc_t; +#pragma warning(default:4324) typedef struct _al_svc_entry diff --git a/trunk/core/al/al_mad.c b/trunk/core/al/al_mad.c index 362cf287..a909da90 100644 --- a/trunk/core/al/al_mad.c +++ b/trunk/core/al/al_mad.c @@ -129,10 +129,6 @@ static void __send_timer_cb( IN void *context ); -static void -__send_async_proc_cb( - IN cl_async_proc_item_t *p_send_async_item ); - static void __check_send_queue( IN ib_mad_svc_handle_t h_mad_svc ); @@ -1132,7 +1128,6 @@ reg_mad_svc( /* Construct the MAD service. */ construct_al_obj( &h_mad_svc->obj, AL_OBJ_TYPE_H_MAD_SVC ); - cl_async_proc_construct( &h_mad_svc->send_async_proc ); cl_timer_construct( &h_mad_svc->send_timer ); cl_timer_construct( &h_mad_svc->recv_timer ); cl_qlist_init( &h_mad_svc->send_list ); @@ -1195,14 +1190,6 @@ reg_mad_svc( return ib_convert_cl_status( cl_status ); } - cl_status = cl_async_proc_init( &h_mad_svc->send_async_proc, - 1, "MAD svc send timeout" ); - if( cl_status != CL_SUCCESS ) - { - h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL ); - return ib_convert_cl_status( cl_status ); - } - *ph_mad_svc = h_mad_svc; CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl ); @@ -1220,6 +1207,9 @@ __destroying_mad_svc( ib_mad_send_handle_t h_send; cl_list_item_t *p_list_item; int32_t timeout_ms; +#ifdef CL_KERNEL + KIRQL old_irql; +#endif CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl ); CL_ASSERT( p_obj ); @@ -1239,14 +1229,6 @@ __destroying_mad_svc( timeout_ms -= 10; } - /* - * Cancel all outstanding send requests. Stop the send timer to avoid - * synchronizing with it. - */ - cl_timer_stop( &h_mad_svc->send_timer ); - cl_async_proc_destroy( &h_mad_svc->send_async_proc ); - cl_timer_destroy( &h_mad_svc->send_timer ); - /* * Deregister from the MAD dispatcher. The MAD dispatcher holds * a reference on the MAD service when invoking callbacks. Since we @@ -1257,6 +1239,8 @@ __destroying_mad_svc( if( h_mad_svc->h_mad_reg ) __mad_disp_dereg( h_mad_svc->h_mad_reg ); + /* Cancel all outstanding send requests. */ + cl_spinlock_acquire( &h_mad_svc->obj.lock ); for( p_list_item = cl_qlist_head( &h_mad_svc->send_list ); p_list_item != cl_qlist_end( &h_mad_svc->send_list ); p_list_item = cl_qlist_next( p_list_item ) ) @@ -1265,13 +1249,22 @@ __destroying_mad_svc( h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item ); h_send->canceled = TRUE; } + cl_spinlock_release( &h_mad_svc->obj.lock ); /* * Invoke the timer callback to return the canceled MADs to the user. * Since the MAD service is being destroyed, the user cannot be issuing * sends. */ +#ifdef CL_KERNEL + old_irql = KeRaiseIrqlToDpcLevel(); +#endif __check_send_queue( h_mad_svc ); +#ifdef CL_KERNEL + KeLowerIrql( old_irql ); +#endif + + cl_timer_destroy( &h_mad_svc->send_timer ); #ifdef CL_KERNEL /* @@ -1280,7 +1273,6 @@ __destroying_mad_svc( if( h_mad_svc->obj.h_al->p_context ) { cl_qlist_t *p_cblist; - cl_list_item_t *p_list_item; al_proxy_cb_info_t *p_cb_info; cl_spinlock_acquire( &h_mad_svc->obj.h_al->p_context->cb_lock ); @@ -1832,6 +1824,63 @@ ib_cancel_mad( } +ib_api_status_t +ib_delay_mad( + IN const ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t* const p_mad_element, + IN const uint32_t delay_ms ) +{ +#ifdef CL_KERNEL + cl_list_item_t *p_list_item; + ib_mad_send_handle_t h_send; +#endif + + AL_ENTER( AL_DBG_MAD_SVC ); + + if( AL_OBJ_INVALID_HANDLE( h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + if( !p_mad_element ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + +#ifndef CL_KERNEL + UNUSED_PARAM( p_mad_element ); + UNUSED_PARAM( delay_ms ); + /* TODO: support for user-mode MAD QP's. */ + AL_EXIT( AL_DBG_MAD_SVC ); + return IB_UNSUPPORTED; +#else + /* Search for the MAD in our MAD list. It may have already completed. */ + cl_spinlock_acquire( &h_mad_svc->obj.lock ); + p_list_item = cl_qlist_find_from_head( &h_mad_svc->send_list, + __mad_svc_find_send, p_mad_element ); + + if( !p_list_item ) + { + cl_spinlock_release( &h_mad_svc->obj.lock ); + AL_TRACE( AL_DBG_MAD_SVC, ("MAD not found\n") ); + return IB_NOT_FOUND; + } + + /* Mark the MAD as having been canceled. */ + h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item ); + + if( h_send->retry_time == MAX_TIME ) + h_send->delay = delay_ms; + else + h_send->retry_time += ((uint64_t)delay_ms * 1000ULL); + + cl_spinlock_release( &h_mad_svc->obj.lock ); + AL_EXIT( AL_DBG_MAD_SVC ); + return IB_SUCCESS; +#endif +} + /* * Process a send completion. @@ -2880,8 +2929,10 @@ static __inline void __set_retry_time( IN ib_mad_send_handle_t h_send ) { - h_send->retry_time = h_send->p_send_mad->timeout_ms * 1000 + + h_send->retry_time = + (uint64_t)(h_send->p_send_mad->timeout_ms + h_send->delay) * 1000ULL + cl_get_time_stamp(); + h_send->delay = 0; } @@ -2890,73 +2941,10 @@ static void __send_timer_cb( IN void *context ) { - ib_mad_svc_handle_t h_mad_svc; - CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl ); - /* - If we haven't already queued the asynchronous processing item to - check the send queue, do so now. - */ - h_mad_svc = (ib_mad_svc_handle_t)context; - cl_spinlock_acquire( &h_mad_svc->obj.lock ); - - /* - See if the asynchronous processing item is in use. If it is already - in use, it means that we're about to check the send queue anyway, so - just ignore the timer. Also, don't bother scheduling if the object - state is not CL_INITIALIZED; we may be destroying the MAD service. - */ - if( !h_mad_svc->send_async_item.pfn_callback && - ( h_mad_svc->obj.state == CL_INITIALIZED ) ) - { - /* Not in use, reference the service and queue the callback. */ - cl_atomic_inc( &h_mad_svc->ref_cnt ); - h_mad_svc->send_async_item.pfn_callback = __send_async_proc_cb; - cl_async_proc_queue( &h_mad_svc->send_async_proc, - &h_mad_svc->send_async_item ); - } - - cl_spinlock_release( &h_mad_svc->obj.lock ); - - CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl ); -} - - - -static void -__send_async_proc_cb( - IN cl_async_proc_item_t *p_send_async_item ) -{ - ib_mad_svc_handle_t h_mad_svc; - - CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl ); - - h_mad_svc = PARENT_STRUCT( p_send_async_item, al_mad_svc_t, - send_async_item ); - - cl_spinlock_acquire( &h_mad_svc->obj.lock ); - - /* - * Don't bother processing if the object state is not - * CL_INITIALIZED; we may be destroying the MAD service. - */ - if( h_mad_svc->obj.state != CL_INITIALIZED ) - { - cl_spinlock_release( &h_mad_svc->obj.lock ); - cl_atomic_dec( &h_mad_svc->ref_cnt ); - return; - } - - /* The send_async_item is available for use again. */ - h_mad_svc->send_async_item.pfn_callback = NULL; - - cl_spinlock_release( &h_mad_svc->obj.lock ); - - __check_send_queue( h_mad_svc ); + __check_send_queue( (ib_mad_svc_handle_t)context ); - /* Release the reference held during async processing. */ - cl_atomic_dec( &h_mad_svc->ref_cnt ); CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl ); } diff --git a/trunk/core/al/al_mad.h b/trunk/core/al/al_mad.h index dc35440a..380618cb 100644 --- a/trunk/core/al/al_mad.h +++ b/trunk/core/al/al_mad.h @@ -147,8 +147,6 @@ typedef struct _al_mad_svc ib_pfn_mad_comp_cb_t pfn_user_send_cb; ib_pfn_mad_comp_cb_t pfn_user_recv_cb; - cl_async_proc_t send_async_proc; - cl_async_proc_item_t send_async_item; cl_qlist_t send_list; cl_timer_t send_timer; @@ -221,5 +219,11 @@ get_mad_hdr_from_wr( IN al_mad_wr_t* const p_mad_wr ); +ib_api_status_t +ib_delay_mad( + IN const ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t* const p_mad_element, + IN const uint32_t delay_ms ); + #endif /* __IB_AL_MAD_H__ */ diff --git a/trunk/core/al/al_mad_pool.h b/trunk/core/al/al_mad_pool.h index 3d6bf378..6ebd9c5d 100644 --- a/trunk/core/al/al_mad_pool.h +++ b/trunk/core/al/al_mad_pool.h @@ -221,6 +221,9 @@ typedef struct _al_mad_send /* Absolute time that the request should be retried. */ uint64_t retry_time; + /* Delay, in milliseconds, to add before the next retry. */ + uint32_t delay; + /* Number of times that the request can be retried. */ uint32_t retry_cnt; boolean_t canceled; /* indicates if send was canceled */ diff --git a/trunk/core/al/al_mcast.c b/trunk/core/al/al_mcast.c index 0111a237..14cb159b 100644 --- a/trunk/core/al/al_mcast.c +++ b/trunk/core/al/al_mcast.c @@ -264,7 +264,7 @@ __destroying_mcast( ref_al_obj( &h_mcast->obj ); status = al_send_sa_req( - &h_mcast->sa_dereg_req, h_mcast->port_guid, 500, 0, &sa_mad_data ); + &h_mcast->sa_dereg_req, h_mcast->port_guid, 500, 0, &sa_mad_data, 0 ); if( status != IB_SUCCESS ) deref_al_obj( &h_mcast->obj ); @@ -395,7 +395,7 @@ send_join( p_mcast->state = SA_REG_STARTING; status = al_send_sa_req( &p_mcast->sa_reg_req, p_mcast->port_guid, - p_mcast_req->timeout_ms, p_mcast_req->retry_cnt, &sa_mad_data ); + p_mcast_req->timeout_ms, p_mcast_req->retry_cnt, &sa_mad_data, 0 ); CL_EXIT( AL_DBG_MCAST, g_al_dbg_lvl ); return status; diff --git a/trunk/core/al/al_proxy.h b/trunk/core/al/al_proxy.h index 80e0afdf..fccf903b 100644 --- a/trunk/core/al/al_proxy.h +++ b/trunk/core/al/al_proxy.h @@ -230,6 +230,10 @@ cl_status_t cm_ioctl( IN cl_ioctl_handle_t h_ioctl, OUT size_t *p_ret_bytes ); +cl_status_t cep_ioctl( + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ); + cl_status_t ioc_ioctl( IN cl_ioctl_handle_t h_ioctl, OUT size_t *p_ret_bytes ); diff --git a/trunk/core/al/al_qp.c b/trunk/core/al/al_qp.c index fd0c5631..94659483 100644 --- a/trunk/core/al/al_qp.c +++ b/trunk/core/al/al_qp.c @@ -37,7 +37,7 @@ #include "al.h" #include "al_av.h" #include "al_ca.h" -#include "al_cm_shared.h" +#include "al_cm_cep.h" #include "al_cq.h" #include "al_debug.h" #include "al_mad.h" @@ -583,37 +583,6 @@ al_bad_join_mcast( } -/* -static ib_api_status_t -al_bad_leave_mcast( - IN const ib_mcast_handle_t h_mcast ) -{ - UNUSED_PARAM( h_mcast ); - return IB_INVALID_PARAMETER; -} -*/ - - -static ib_api_status_t -al_bad_cm_call( - IN OUT al_conn_t* const p_conn ) -{ - UNUSED_PARAM( p_conn ); - return IB_INVALID_PARAMETER; -} - - -static ib_api_status_t -al_bad_cm_pre_rep( - IN OUT al_conn_t* const p_conn, - IN OUT const ib_cm_rep_t* p_cm_rep ) -{ - UNUSED_PARAM( p_conn ); - UNUSED_PARAM( p_cm_rep ); - return IB_INVALID_PARAMETER; -} - - ib_api_status_t init_base_qp( IN ib_qp_t* const p_qp, @@ -733,11 +702,6 @@ init_raw_qp( cq_attach_qp( h_qp->h_recv_cq, &h_qp->recv_cq_rel ); cq_attach_qp( h_qp->h_send_cq, &h_qp->send_cq_rel ); - /* - * Get the QP attributes. This works around a bug with create QP calls - * above not reporting the correct attributes. - */ -// ib_query_qp( h_qp, &qp_attr ); h_qp->num = qp_attr.num; return IB_SUCCESS; @@ -758,12 +722,10 @@ init_conn_qp( /* Initialize the inherited QP first. */ status = init_raw_qp( &p_conn_qp->qp, h_pd, UNBOUND_PORT_GUID, p_qp_create, p_umv_buf ); - if( status != IB_SUCCESS ) - { - return status; - } - return IB_SUCCESS; + p_conn_qp->cid = AL_INVALID_CID; + + return status; } @@ -1051,7 +1013,7 @@ destroying_qp( ib_qp_handle_t h_qp; al_mad_qp_t *p_mad_qp; al_qp_alias_t *p_qp_alias; - al_conn_qp_t *p_conn_qp; + net32_t cid; CL_ASSERT( p_obj ); h_qp = PARENT_STRUCT( p_obj, ib_qp_t, obj ); @@ -1119,12 +1081,19 @@ destroying_qp( case IB_QPT_RELIABLE_CONN: case IB_QPT_UNRELIABLE_CONN: - p_conn_qp = PARENT_STRUCT( h_qp, al_conn_qp_t, qp); + cid = cl_atomic_xchg( + &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID ); + if( cid != AL_INVALID_CID ) + { + ref_al_obj( &h_qp->obj ); + if( al_destroy_cep( + h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS ) + { + deref_al_obj( &h_qp->obj ); + } + } - /* Disconnect the QP. */ - cm_conn_destroy( p_conn_qp ); /* Fall through. */ - case IB_QPT_UNRELIABLE_DGRM: default: /* Multicast membership gets cleaned up by object hierarchy. */ @@ -2039,6 +2008,26 @@ qp_async_event_cb( CL_ASSERT( p_event_rec ); h_qp = (ib_qp_handle_t)p_event_rec->context; +#if defined(CL_KERNEL) + switch( p_event_rec->code ) + { + case IB_AE_QP_COMM: + al_cep_established( h_qp->obj.h_al, ((al_conn_qp_t*)h_qp)->cid ); + break; + + case IB_AE_QP_APM: + al_cep_migrate( h_qp->obj.h_al, ((al_conn_qp_t*)h_qp)->cid ); + break; + + case IB_AE_QP_APM_ERROR: + //***TODO: Figure out how to handle these errors. + break; + + default: + break; + } +#endif + p_event_rec->context = (void*)h_qp->obj.context; p_event_rec->handle.h_qp = h_qp; @@ -2087,13 +2076,3 @@ ib_bind_mw( CL_EXIT( AL_DBG_MW, g_al_dbg_lvl ); return status; } - - -ib_al_handle_t -qp_get_al( - IN const ib_qp_handle_t h_qp ) -{ - /* AL the is great-grandparent of the QP. */ - return (ib_al_handle_t) - h_qp->obj.p_parent_obj->p_parent_obj->p_parent_obj; -} diff --git a/trunk/core/al/al_qp.h b/trunk/core/al/al_qp.h index 960cd343..a8c3c581 100644 --- a/trunk/core/al/al_qp.h +++ b/trunk/core/al/al_qp.h @@ -161,6 +161,20 @@ typedef struct _al_conn_qp ib_cm_handle_t p_conn; + atomic32_t cid; + + /* Callback table. */ + ib_pfn_cm_req_cb_t pfn_cm_req_cb; + ib_pfn_cm_rep_cb_t pfn_cm_rep_cb; + ib_pfn_cm_mra_cb_t pfn_cm_mra_cb; + ib_pfn_cm_rtu_cb_t pfn_cm_rtu_cb; + ib_pfn_cm_lap_cb_t pfn_cm_lap_cb; + ib_pfn_cm_apr_cb_t pfn_cm_apr_cb; + ib_pfn_cm_dreq_cb_t pfn_cm_dreq_cb; + ib_pfn_cm_drep_cb_t pfn_cm_drep_cb; + ib_pfn_cm_rej_cb_t pfn_cm_rej_cb; /* If RTU times out */ + + } al_conn_qp_t; @@ -284,9 +298,12 @@ qp_async_event_cb( /* Return the AL instance associated with this QP. */ -ib_al_handle_t +static inline ib_al_handle_t qp_get_al( - IN const ib_qp_handle_t h_qp ); + IN const ib_qp_handle_t h_qp ) +{ + return h_qp->obj.h_al; +} #endif /* __AL_QP_H__ */ diff --git a/trunk/core/al/al_query.c b/trunk/core/al/al_query.c index 00c6c6f4..f6fd620d 100644 --- a/trunk/core/al/al_query.c +++ b/trunk/core/al/al_query.c @@ -48,18 +48,14 @@ static ib_api_status_t query_sa( IN al_query_t *p_query, - IN const ib_query_req_t* const p_query_req ); + IN const ib_query_req_t* const p_query_req, + IN const ib_al_flags_t flags ); void query_req_cb( IN al_sa_req_t *p_sa_req, IN ib_mad_element_t *p_mad_response ); -static void -__free_query( - IN OUT al_query_t *p_query ); - - ib_api_status_t ib_query( @@ -69,8 +65,6 @@ ib_query( { al_query_t *p_query; ib_api_status_t status; - cl_status_t cl_status; - boolean_t sync; CL_ENTER( AL_DBG_QUERY, g_al_dbg_lvl ); @@ -98,23 +92,6 @@ ib_query( return IB_INSUFFICIENT_MEMORY; } - /* Check for synchronous operation. */ - p_query->flags = p_query_req->flags; - cl_event_construct( &p_query->event ); - sync = ( (p_query->flags & IB_FLAGS_SYNC) == IB_FLAGS_SYNC ); - if( sync ) - { - cl_status = cl_event_init( &p_query->event, TRUE ); - if( cl_status != CL_SUCCESS ) - { - status = ib_convert_cl_status( cl_status ); - CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, - ("cl_init_event failed: %s\n", ib_get_err_str(status) ) ); - __free_query( p_query ); - return status; - } - } - /* Copy the query context information. */ p_query->sa_req.pfn_sa_req_cb = query_req_cb; p_query->sa_req.user_context = p_query_req->query_context; @@ -124,38 +101,26 @@ ib_query( /* Track the query with the AL instance. */ al_insert_query( h_al, p_query ); + /* + * Set the query handle now so that users that do sync queries + * can also cancel the queries. + */ + if( ph_query ) + *ph_query = p_query; + /* Issue the MAD to the SA. */ - status = query_sa( p_query, (ib_query_req_t*)p_query_req ); - if( status == IB_SUCCESS ) - { - /* - * Set the query handle now so that users that do sync queries - * can also cancel the queries. - */ - if( ph_query ) - *ph_query = p_query; - /* If synchronous, wait for the completion. */ - if( sync ) - { - do - { - cl_status = cl_event_wait_on( - &p_query->event, EVENT_NO_TIMEOUT, AL_WAIT_ALERTABLE ); - } while( cl_status == CL_NOT_DONE ); - CL_ASSERT( cl_status == CL_SUCCESS ); - } - } - else if( status != IB_INVALID_GUID ) + status = query_sa( p_query, p_query_req, p_query_req->flags ); + if( status != IB_SUCCESS && status != IB_INVALID_GUID ) { CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl, ("query_sa failed: %s\n", ib_get_err_str(status) ) ); } /* Cleanup from issuing the query if it failed or was synchronous. */ - if( ( status != IB_SUCCESS ) || sync ) + if( status != IB_SUCCESS ) { al_remove_query( p_query ); - __free_query( p_query ); + cl_free( p_query ); } CL_EXIT( AL_DBG_QUERY, g_al_dbg_lvl ); @@ -170,7 +135,8 @@ ib_query( static ib_api_status_t query_sa( IN al_query_t *p_query, - IN const ib_query_req_t* const p_query_req ) + IN const ib_query_req_t* const p_query_req, + IN const ib_al_flags_t flags ) { ib_user_query_t sa_req, *p_sa_req; union _query_sa_recs @@ -329,7 +295,7 @@ query_sa( status = al_send_sa_req( &p_query->sa_req, p_query_req->port_guid, p_query_req->timeout_ms, - p_query_req->retry_cnt, p_sa_req ); + p_query_req->retry_cnt, p_sa_req, flags ); CL_EXIT( AL_DBG_QUERY, g_al_dbg_lvl ); return status; } @@ -389,29 +355,9 @@ query_req_cb( /* Notify the user of the result. */ p_query->pfn_query_cb( &query_rec ); - /* Check for synchronous operation. */ - if( (p_query->flags & IB_FLAGS_SYNC) == IB_FLAGS_SYNC ) - { - cl_event_signal( &p_query->event ); - } - else - { - /* Cleanup from issuing the query. */ - al_remove_query( p_query ); - __free_query( p_query ); - } + /* Cleanup from issuing the query. */ + al_remove_query( p_query ); + cl_free( p_query ); CL_EXIT( AL_DBG_QUERY, g_al_dbg_lvl ); } - - - -static void -__free_query( - IN OUT al_query_t *p_query ) -{ - CL_ASSERT( p_query ); - - cl_event_destroy( &p_query->event ); - cl_free( p_query ); -} diff --git a/trunk/core/al/al_query.h b/trunk/core/al/al_query.h index 31b58905..9186e9ee 100644 --- a/trunk/core/al/al_query.h +++ b/trunk/core/al/al_query.h @@ -92,6 +92,7 @@ typedef struct _al_sa_req sa_req_svc_t *p_sa_req_svc; /* For cancellation */ ib_mad_element_t *p_mad_response; ib_mad_element_t *p_mad_request; /* For cancellation */ + KEVENT *p_sync_event; #else /* defined( CL_KERNEL ) */ uint64_t hdl; ual_send_sa_req_ioctl_t ioctl; @@ -108,10 +109,6 @@ typedef struct _al_query { al_sa_req_t sa_req; /* Must be first. */ - /* Used to perform synchronous requests. */ - ib_al_flags_t flags; - cl_event_t event; - ib_al_handle_t h_al; ib_pfn_query_cb_t pfn_query_cb; ib_query_type_t query_type; @@ -130,7 +127,8 @@ al_send_sa_req( IN const net64_t port_guid, IN const uint32_t timeout_ms, IN const uint32_t retry_cnt, - IN const ib_user_query_t* const p_sa_req_data ); + IN const ib_user_query_t* const p_sa_req_data, + IN const ib_al_flags_t flags ); #if defined( CL_KERNEL ) static __inline void diff --git a/trunk/core/al/al_reg_svc.c b/trunk/core/al/al_reg_svc.c index 2f16f106..2dd09123 100644 --- a/trunk/core/al/al_reg_svc.c +++ b/trunk/core/al/al_reg_svc.c @@ -47,12 +47,16 @@ __dereg_svc_cb( { ib_reg_svc_handle_t h_reg_svc; - h_reg_svc = PARENT_STRUCT ( p_sa_req, al_reg_svc_t, sa_req ); + /* + * Note that we come into this callback with a reference + * on the registration object. + */ + h_reg_svc = PARENT_STRUCT( p_sa_req, al_reg_svc_t, sa_req ); if( p_mad_response ) ib_put_mad( p_mad_response ); - deref_al_obj( &h_reg_svc->obj ); + h_reg_svc->obj.pfn_destroy( &h_reg_svc->obj, NULL ); } @@ -76,7 +80,7 @@ __sa_dereg_svc( sa_mad_data.comp_mask = ~CL_CONST64(0); if( al_send_sa_req( &h_reg_svc->sa_req, h_reg_svc->port_guid, - 500, 0, &sa_mad_data ) != IB_SUCCESS ) + 500, 0, &sa_mad_data, 0 ) != IB_SUCCESS ) { /* Cleanup from the registration. */ deref_al_obj( &h_reg_svc->obj ); @@ -153,12 +157,15 @@ reg_svc_req_cb( h_reg_svc->pfn_reg_svc_cb( ®_svc_rec ); - /* Check for synchronous operation. */ - if( (h_reg_svc->flags & IB_FLAGS_SYNC) == IB_FLAGS_SYNC ) - cl_event_signal( &h_reg_svc->event ); - - /* Release the reference taken when issuing the request. */ - deref_al_obj( &h_reg_svc->obj ); + if( p_sa_req->status != IB_SUCCESS ) + { + h_reg_svc->obj.pfn_destroy( &h_reg_svc->obj, NULL ); + } + else + { + /* Release the reference taken when issuing the request. */ + deref_al_obj( &h_reg_svc->obj ); + } } @@ -211,7 +218,6 @@ __free_sa_reg( h_sa_reg = PARENT_STRUCT( p_obj, al_reg_svc_t, obj ); destroy_al_obj( p_obj ); - cl_event_destroy( &h_sa_reg->event ); cl_free( h_sa_reg ); AL_EXIT( AL_DBG_SA_REQ ); @@ -224,7 +230,6 @@ sa_reg_svc( IN const ib_reg_svc_req_t* const p_reg_svc_req ) { ib_user_query_t sa_mad_data; - ib_api_status_t status; /* Set the request information. */ h_reg_svc->sa_req.pfn_sa_req_cb = reg_svc_req_cb; @@ -238,9 +243,9 @@ sa_reg_svc( sa_mad_data.comp_mask = p_reg_svc_req->svc_data_mask; sa_mad_data.p_attr = &h_reg_svc->svc_rec; - status = al_send_sa_req( &h_reg_svc->sa_req, h_reg_svc->port_guid, - p_reg_svc_req->timeout_ms, p_reg_svc_req->retry_cnt, &sa_mad_data ); - return status; + return al_send_sa_req( &h_reg_svc->sa_req, h_reg_svc->port_guid, + p_reg_svc_req->timeout_ms, p_reg_svc_req->retry_cnt, &sa_mad_data, + p_reg_svc_req->flags ); } @@ -252,7 +257,6 @@ ib_reg_svc( { ib_reg_svc_handle_t h_sa_reg = NULL; ib_api_status_t status; - cl_status_t cl_status; AL_ENTER( AL_DBG_SA_REQ ); @@ -275,8 +279,6 @@ ib_reg_svc( return IB_INSUFFICIENT_MEMORY; } - h_sa_reg->flags = p_reg_svc_req->flags; - cl_event_construct( &h_sa_reg->event ); construct_al_obj( &h_sa_reg->obj, AL_OBJ_TYPE_H_SA_REG ); status = init_al_obj( &h_sa_reg->obj, p_reg_svc_req->svc_context, TRUE, @@ -299,20 +301,6 @@ ib_reg_svc( return status; } - /* Check for synchronous operation. */ - if( h_sa_reg->flags & IB_FLAGS_SYNC ) - { - cl_status = cl_event_init( &h_sa_reg->event, TRUE ); - if( cl_status != CL_SUCCESS ) - { - status = ib_convert_cl_status( cl_status ); - AL_TRACE_EXIT( AL_DBG_ERROR, - ("cl_init_event failed: %s\n", ib_get_err_str(status)) ); - h_sa_reg->obj.pfn_destroy( &h_sa_reg->obj, NULL ); - return status; - } - } - /* Store the port GUID on which to issue the request. */ h_sa_reg->port_guid = p_reg_svc_req->port_guid; @@ -325,38 +313,18 @@ ib_reg_svc( /* Issue the MAD to the SA. */ status = sa_reg_svc( h_sa_reg, p_reg_svc_req ); - if( status == IB_SUCCESS ) - { - /* If synchronous, wait for the completion. */ - if( h_sa_reg->flags & IB_FLAGS_SYNC ) - { - do - { - cl_status = cl_event_wait_on( - &h_sa_reg->event, EVENT_NO_TIMEOUT, AL_WAIT_ALERTABLE ); - } while( cl_status == CL_NOT_DONE ); - CL_ASSERT( cl_status == CL_SUCCESS ); - - /* Cleanup from issuing the request if it failed. */ - if( h_sa_reg->state == SA_REG_ERROR ) - { - status = h_sa_reg->req_status; - /* The callback released the reference from init_al_obj. */ - ref_al_obj( &h_sa_reg->obj ); - } - } - } - else + if( status != IB_SUCCESS ) { AL_TRACE( AL_DBG_ERROR, ("sa_reg_svc failed: %s\n", ib_get_err_str(status) ) ); h_sa_reg->state = SA_REG_ERROR; - } - if( status != IB_SUCCESS ) h_sa_reg->obj.pfn_destroy( &h_sa_reg->obj, NULL ); + } else + { *ph_reg_svc = h_sa_reg; + } AL_EXIT( AL_DBG_SA_REQ ); return status; diff --git a/trunk/core/al/al_reg_svc.h b/trunk/core/al/al_reg_svc.h index c2d61c0b..56f6dbc5 100644 --- a/trunk/core/al/al_reg_svc.h +++ b/trunk/core/al/al_reg_svc.h @@ -51,10 +51,6 @@ typedef struct _al_reg_svc /* Additional status information returned in the registration response. */ ib_net16_t resp_status; - /* Used to perform synchronous requests. */ - ib_al_flags_t flags; - cl_event_t event; - al_sa_reg_state_t state; ib_pfn_reg_svc_cb_t pfn_reg_svc_cb; diff --git a/trunk/core/al/kernel/SOURCES b/trunk/core/al/kernel/SOURCES index 778a0118..1d2001f3 100644 --- a/trunk/core/al/kernel/SOURCES +++ b/trunk/core/al/kernel/SOURCES @@ -7,9 +7,7 @@ DLLDEF=al_exports.def SOURCES= ibal.rc \ al_ca_pnp.c \ al_ci_ca.c \ - al_cm.c \ - al_cm_conn.c \ - al_cm_sidr.c \ + al_cm_cep.c \ al_dev.c \ al_driver.c \ al_ioc_pnp.c \ @@ -18,7 +16,7 @@ SOURCES= ibal.rc \ al_mr.c \ al_pnp.c \ al_proxy.c \ - al_proxy_cm.c \ + al_proxy_cep.c \ al_proxy_ioc.c \ al_proxy_subnet.c \ al_proxy_verbs.c \ @@ -28,7 +26,7 @@ SOURCES= ibal.rc \ ..\al_av.c \ ..\al_ca.c \ ..\al_ci_ca_shared.c \ - ..\al_cm_shared.c \ + ..\al_cm_qp.c \ ..\al_common.c \ ..\al_cq.c \ ..\al_dm.c \ diff --git a/trunk/core/al/kernel/al_cm_cep.c b/trunk/core/al/kernel/al_cm_cep.c new file mode 100644 index 00000000..2bcbfbcf --- /dev/null +++ b/trunk/core/al/kernel/al_cm_cep.c @@ -0,0 +1,5956 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include +#include +#include +#include "al_common.h" +#include "al_cm_cep.h" +#include "al_cm_conn.h" +#include "al_cm_sidr.h" +#include "al_debug.h" +#include "ib_common.h" +#include "al_mgr.h" +#include "al_ca.h" +#include "al.h" +#include "al_mad.h" +#include "al_qp.h" + + +/* + * The vector object uses a list item at the front of the buffers + * it allocates. Take the list item into account so that allocations + * are for full page sizes. + */ +#define CEP_CID_MIN \ + ((PAGE_SIZE - sizeof(cl_list_item_t)) / sizeof(cep_cid_t)) +#define CEP_CID_GROW \ + ((PAGE_SIZE - sizeof(cl_list_item_t)) / sizeof(cep_cid_t)) + +/* + * We reserve the upper byte of the connection ID as a revolving counter so + * that connections that are retried by the client change connection ID. + * This counter is never zero, so it is OK to use all CIDs since we will never + * have a full CID (base + counter) that is zero. + * See the IB spec, section 12.9.8.7 for details about REJ retry. + */ +#define CEP_MAX_CID (0x00FFFFFF) +#define CEP_MAX_CID_MASK (0x00FFFFFF) + +#define CEP_MAD_SQ_DEPTH (128) +#define CEP_MAD_RQ_DEPTH (1) /* ignored. */ +#define CEP_MAD_SQ_SGE (1) +#define CEP_MAD_RQ_SGE (1) /* ignored. */ + + +/* Global connection manager object. */ +typedef struct _al_cep_mgr +{ + al_obj_t obj; + + cl_qmap_t port_map; + + KSPIN_LOCK lock; + + /* Bitmap of CEPs, indexed by CID. */ + cl_vector_t cid_vector; + uint32_t free_cid; + + /* List of active listens. */ + cl_rbmap_t listen_map; + + /* Map of CEP by remote CID and CA GUID. */ + cl_rbmap_t conn_id_map; + /* Map of CEP by remote QPN, used for stale connection matching. */ + cl_rbmap_t conn_qp_map; + + NPAGED_LOOKASIDE_LIST cep_pool; + NPAGED_LOOKASIDE_LIST req_pool; + + /* + * Periodically walk the list of connections in the time wait state + * and flush them as appropriate. + */ + cl_timer_t timewait_timer; + cl_qlist_t timewait_list; + + ib_pnp_handle_t h_pnp; + +} al_cep_mgr_t; + + +/* Per-port CM object. */ +typedef struct _cep_port_agent +{ + al_obj_t obj; + + cl_map_item_t item; + + ib_ca_handle_t h_ca; + ib_pd_handle_t h_pd; + ib_qp_handle_t h_qp; + ib_pool_key_t pool_key; + ib_mad_svc_handle_t h_mad_svc; + + net64_t port_guid; + uint8_t port_num; + net16_t base_lid; + +} cep_agent_t; + + +/* + * Note: the REQ, REP, and LAP values must be 1, 2, and 4 respectively. + * This allows shifting 1 << msg_mraed from an MRA to figure out for what + * message the MRA was sent for. + */ +#define CEP_STATE_RCVD 0x10000000 +#define CEP_STATE_SENT 0x20000000 +#define CEP_STATE_MRA 0x01000000 +#define CEP_STATE_REQ 0x00000001 +#define CEP_STATE_REP 0x00000002 +#define CEP_STATE_LAP 0x00000004 +#define CEP_STATE_RTU 0x00000008 +#define CEP_STATE_DREQ 0x00000010 +#define CEP_STATE_DREP 0x00000020 +#define CEP_STATE_DESTROYING 0x00010000 +#define CEP_STATE_USER 0x00020000 + +#define CEP_MSG_MASK 0x000000FF +#define CEP_OP_MASK 0xF0000000 + +#define CEP_STATE_PREP 0x00100000 + +/* States match CM state transition diagrams from spec. */ +typedef enum _cep_state +{ + CEP_STATE_IDLE, + CEP_STATE_LISTEN, + CEP_STATE_ESTABLISHED, + CEP_STATE_TIMEWAIT, + CEP_STATE_SREQ_SENT, + CEP_STATE_SREQ_RCVD, + CEP_STATE_ERROR, + CEP_STATE_DESTROY = CEP_STATE_DESTROYING, + CEP_STATE_PRE_REQ = CEP_STATE_IDLE | CEP_STATE_PREP, + CEP_STATE_REQ_RCVD = CEP_STATE_REQ | CEP_STATE_RCVD, + CEP_STATE_PRE_REP = CEP_STATE_REQ_RCVD | CEP_STATE_PREP, + CEP_STATE_REQ_SENT = CEP_STATE_REQ | CEP_STATE_SENT, + CEP_STATE_REQ_MRA_RCVD = CEP_STATE_REQ_SENT | CEP_STATE_MRA, + CEP_STATE_REQ_MRA_SENT = CEP_STATE_REQ_RCVD | CEP_STATE_MRA, + CEP_STATE_PRE_REP_MRA_SENT = CEP_STATE_REQ_MRA_SENT | CEP_STATE_PREP, + CEP_STATE_REP_RCVD = CEP_STATE_REP | CEP_STATE_RCVD, + CEP_STATE_REP_SENT = CEP_STATE_REP | CEP_STATE_SENT, + CEP_STATE_REP_MRA_RCVD = CEP_STATE_REP_SENT | CEP_STATE_MRA, + CEP_STATE_REP_MRA_SENT = CEP_STATE_REP_RCVD | CEP_STATE_MRA, + CEP_STATE_LAP_RCVD = CEP_STATE_LAP | CEP_STATE_RCVD, + CEP_STATE_PRE_APR = CEP_STATE_LAP_RCVD | CEP_STATE_PREP, + CEP_STATE_LAP_SENT = CEP_STATE_LAP | CEP_STATE_SENT, + CEP_STATE_LAP_MRA_RCVD = CEP_STATE_LAP_SENT | CEP_STATE_MRA, + CEP_STATE_LAP_MRA_SENT = CEP_STATE_LAP_RCVD | CEP_STATE_MRA, + CEP_STATE_PRE_APR_MRA_SENT = CEP_STATE_LAP_MRA_SENT | CEP_STATE_PREP, + CEP_STATE_DREQ_SENT = CEP_STATE_DREQ | CEP_STATE_SENT, + CEP_STATE_DREQ_RCVD = CEP_STATE_DREQ | CEP_STATE_RCVD, + CEP_STATE_DREQ_DESTROY = CEP_STATE_DREQ_SENT | CEP_STATE_DESTROYING + +} cep_state_t; + + +/* Active side CEP state transitions: +* al_create_cep -> IDLE +* al_cep_pre_req -> PRE_REQ +* al_cep_send_req -> REQ_SENT +* Recv REQ MRA -> REQ_MRA_RCVD +* Recv REP -> REP_RCVD +* al_cep_mra -> REP_MRA_SENT +* al_cep_rtu -> ESTABLISHED +* +* Passive side CEP state transitions: +* al_create_cep -> IDLE +* Recv REQ -> REQ_RCVD +* al_cep_mra* -> REQ_MRA_SENT +* al_cep_pre_rep -> PRE_REP +* al_cep_mra* -> PRE_REP_MRA_SENT +* al_cep_send_rep -> REP_SENT +* Recv RTU -> ESTABLISHED +* +* *al_cep_mra can only be called once - either before or after PRE_REP. +*/ + +typedef struct _al_kcep_av +{ + ib_av_attr_t attr; + net64_t port_guid; + uint16_t pkey_index; + +} kcep_av_t; + + +typedef struct _al_kcep +{ + ib_cep_t cep; + + struct _cep_cid *p_cid; + + net64_t sid; + + /* Port guid for filtering incoming requests. */ + net64_t port_guid; + + uint8_t* __ptr64 p_cmp_buf; + uint8_t cmp_offset; + uint8_t cmp_len; + + boolean_t p2p; + + /* Used to store connection structure with owning AL instance. */ + cl_list_item_t al_item; + + /* Flag to indicate whether a user is processing events. */ + boolean_t signalled; + + /* Destroy callback. */ + ib_pfn_destroy_cb_t pfn_destroy_cb; + + ib_mad_element_t *p_mad_head; + ib_mad_element_t *p_mad_tail; + al_pfn_cep_cb_t pfn_cb; + + IRP *p_irp; + + /* MAP item for finding listen CEPs. */ + cl_rbmap_item_t listen_item; + + /* Map item for finding CEPs based on remote comm ID & CA GUID. */ + cl_rbmap_item_t rem_id_item; + + /* Map item for finding CEPs based on remote QP number. */ + cl_rbmap_item_t rem_qp_item; + + /* Communication ID's for the connection. */ + net32_t local_comm_id; + net32_t remote_comm_id; + + net64_t local_ca_guid; + net64_t remote_ca_guid; + + /* Remote QP, used for stale connection checking. */ + net32_t remote_qpn; + + /* Parameters to format QP modification structure. */ + net32_t sq_psn; + net32_t rq_psn; + uint8_t resp_res; + uint8_t init_depth; + uint8_t rnr_nak_timeout; + + /* + * Local QP number, used for the "additional check" required + * of the DREQ. + */ + net32_t local_qpn; + + /* PKEY to make sure a LAP is on the same partition. */ + net16_t pkey; + + /* Initiator depth as received in the REQ. */ + uint8_t req_init_depth; + + /* + * Primary and alternate path info, used to create the address vectors for + * sending MADs, to locate the port CM agent to use for outgoing sends, + * and for creating the address vectors for transitioning QPs. + */ + kcep_av_t av[2]; + uint8_t idx_primary; + + /* Temporary AV and CEP port GUID used when processing LAP. */ + kcep_av_t alt_av; + uint8_t alt_2pkt_life; + + /* maxium packet lifetime * 2 of any path used on a connection. */ + uint8_t max_2pkt_life; + /* Given by the REP, used for alternate path setup. */ + uint8_t target_ack_delay; + /* Stored to help calculate the local ACK delay in the LAP. */ + uint8_t local_ack_delay; + + /* Volatile to allow using atomic operations for state checks. */ + cep_state_t state; + + /* + * Flag that indicates whether a connection took the active role during + * establishment. + */ + boolean_t was_active; + + /* + * Handle to the sent MAD, used for cancelling. We store the handle to + * the mad service so that we can properly cancel. This should not be a + * problem since all outstanding sends should be completed before the + * mad service completes its destruction and the handle becomes invalid. + */ + ib_mad_svc_handle_t h_mad_svc; + ib_mad_element_t *p_send_mad; + + /* Number of outstanding MADs. Delays destruction of CEP destruction. */ + atomic32_t ref_cnt; + + /* MAD transaction ID to use when sending MADs. */ + uint64_t tid; + + /* Maximum retries per MAD. Set at REQ time, stored to retry LAP. */ + uint8_t max_cm_retries; + /* Timeout value, in milliseconds. Set at REQ time, stored to retry LAP. */ + uint32_t retry_timeout; + + /* Timer that will be signalled when the CEP exits timewait. */ + KTIMER timewait_timer; + LARGE_INTEGER timewait_time; + cl_list_item_t timewait_item; + + /* + * Pointer to a formatted MAD. The pre_req, pre_rep and pre_apr calls + * allocate and format the MAD, and the send_req, send_rep and send_apr + * calls send it. + */ + ib_mad_element_t *p_mad; + + /* Cache the last MAD sent for retransmission. */ + union _mads + { + ib_mad_t hdr; + mad_cm_mra_t mra; + mad_cm_rtu_t rtu; + mad_cm_drep_t drep; + + } mads; + +} kcep_t; + + +/* Structures stored in the CID vector. */ +typedef struct _cep_cid +{ + /* Owning AL handle. NULL if invalid. */ + ib_al_handle_t h_al; + /* Pointer to CEP, or index of next free entry if h_al is NULL. */ + kcep_t *p_cep; + /* For REJ Retry support */ + uint8_t modifier; + +} cep_cid_t; + + +/* Global instance of the CM agent. */ +al_cep_mgr_t *gp_cep_mgr = NULL; + + +static ib_api_status_t +__format_drep( + IN kcep_t* const p_cep, + IN const uint8_t* p_pdata OPTIONAL, + IN uint8_t pdata_len, + IN OUT mad_cm_drep_t* const p_drep ); + +static ib_api_status_t +__cep_queue_mad( + IN kcep_t* const p_cep, + IN ib_mad_element_t* p_mad ); + +static inline void +__process_cep( + IN kcep_t* const p_cep ); + +static inline uint32_t +__calc_mad_timeout( + IN const uint8_t pkt_life ); + +static inline void +__calc_timewait( + IN kcep_t* const p_cep ); + +static kcep_t* +__create_cep( void ); + +static int32_t +__cleanup_cep( + IN kcep_t* const p_cep ); + +static void +__destroy_cep( + IN kcep_t* const p_cep ); + +static inline void +__bind_cep( + IN kcep_t* const p_cep, + IN ib_al_handle_t h_al, + IN al_pfn_cep_cb_t pfn_cb, + IN void *context ); + +static inline void +__unbind_cep( + IN kcep_t* const p_cep ); + +static void +__pre_destroy_cep( + IN kcep_t* const p_cep ); + +static kcep_t* +__lookup_by_id( + IN net32_t remote_comm_id, + IN net64_t remote_ca_guid ); + +static kcep_t* +__lookup_listen( + IN net64_t sid, + IN net64_t port_guid, + IN void *p_pdata ); + +static inline kcep_t* +__lookup_cep( + IN ib_al_handle_t h_al OPTIONAL, + IN net32_t cid ); + +static inline kcep_t* +__insert_cep( + IN kcep_t* const p_new_cep ); + +static inline void +__remove_cep( + IN kcep_t* const p_cep ); + +static inline void +__insert_timewait( + IN kcep_t* const p_cep ); + +static ib_api_status_t +__cep_send_mad( + IN cep_agent_t* const p_port_cep, + IN ib_mad_element_t* const p_mad ); + +/* Returns the 1-based port index of the CEP agent with the specified GID. */ +static cep_agent_t* +__find_port_cep( + IN const ib_gid_t* const p_gid, + IN const net16_t lid, + IN const net16_t pkey, + OUT uint16_t* const p_pkey_index ); + +static cep_cid_t* +__get_lcid( + OUT net32_t* const p_cid ); + +static void +__process_cep_send_comp( + IN cl_async_proc_item_t *p_item ); + + +/****************************************************************************** +* Per-port CEP agent +******************************************************************************/ + + +static inline void +__format_mad_hdr( + IN ib_mad_t* const p_mad, + IN const kcep_t* const p_cep, + IN net16_t attr_id ) +{ + p_mad->base_ver = 1; + p_mad->mgmt_class = IB_MCLASS_COMM_MGMT; + p_mad->class_ver = IB_MCLASS_CM_VER_2; + p_mad->method = IB_MAD_METHOD_SEND; + p_mad->status = 0; + p_mad->class_spec = 0; + p_mad->trans_id = p_cep->tid; + p_mad->attr_id = attr_id; + p_mad->resv = 0; + p_mad->attr_mod = 0; +} + + +/* Consumes the input MAD. */ +static void +__reject_mad( + IN cep_agent_t* const p_port_cep, + IN kcep_t* const p_cep, + IN ib_mad_element_t* const p_mad, + IN ib_rej_status_t reason ) +{ + mad_cm_rej_t *p_rej; + + AL_ENTER( AL_DBG_CM ); + + p_rej = (mad_cm_rej_t*)p_mad->p_mad_buf; + + __format_mad_hdr( p_mad->p_mad_buf, p_cep, CM_REJ_ATTR_ID ); + + p_rej->local_comm_id = p_cep->local_comm_id; + p_rej->remote_comm_id = p_cep->remote_comm_id; + p_rej->reason = reason; + + switch( p_cep->state ) + { + case CEP_STATE_REQ_RCVD: + case CEP_STATE_REQ_MRA_SENT: + case CEP_STATE_PRE_REP: + case CEP_STATE_PRE_REP_MRA_SENT: + conn_rej_set_msg_rejected( 0, p_rej ); + break; + + case CEP_STATE_REP_RCVD: + case CEP_STATE_REP_MRA_SENT: + conn_rej_set_msg_rejected( 1, p_rej ); + break; + + default: + CL_ASSERT( reason == IB_REJ_TIMEOUT ); + conn_rej_set_msg_rejected( 2, p_rej ); + break; + } + + conn_rej_clr_rsvd_fields( p_rej ); + __cep_send_mad( p_port_cep, p_mad ); + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__reject_timeout( + IN cep_agent_t* const p_port_cep, + IN kcep_t* const p_cep, + IN const ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + ib_mad_element_t *p_rej_mad; + ib_mad_t *p_mad_buf; + ib_grh_t *p_grh; + + AL_ENTER( AL_DBG_CM ); + + status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_rej_mad ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("ib_get_mad returned %s\n", ib_get_err_str( status )) ); + return; + } + + /* Save the buffer pointers from the new element. */ + p_mad_buf = p_rej_mad->p_mad_buf; + p_grh = p_rej_mad->p_grh; + + /* + * Copy the input MAD element to the reject - this gives us + * all appropriate addressing information. + */ + cl_memcpy( p_rej_mad, p_mad, sizeof(ib_mad_element_t) ); + cl_memcpy( p_grh, p_mad->p_grh, sizeof(ib_grh_t) ); + + /* Restore the buffer pointers now that the copy is complete. */ + p_rej_mad->p_mad_buf = p_mad_buf; + p_rej_mad->p_grh = p_grh; + + status = conn_rej_set_pdata( NULL, 0, (mad_cm_rej_t*)p_mad_buf ); + CL_ASSERT( status == IB_SUCCESS ); + + /* Copy the local CA GUID into the ARI. */ + switch( p_mad->p_mad_buf->attr_id ) + { + case CM_REQ_ATTR_ID: + status = conn_rej_set_ari( + (uint8_t*)&p_cep->local_ca_guid, + sizeof(p_cep->local_ca_guid), (mad_cm_rej_t*)p_mad_buf ); + CL_ASSERT( status == IB_SUCCESS ); + __reject_mad( p_port_cep, p_cep, p_rej_mad, IB_REJ_TIMEOUT ); + break; + + case CM_REP_ATTR_ID: + status = conn_rej_set_ari( + (uint8_t*)&p_cep->local_ca_guid, + sizeof(p_cep->local_ca_guid), (mad_cm_rej_t*)p_mad_buf ); + CL_ASSERT( status == IB_SUCCESS ); + __reject_mad( p_port_cep, p_cep, p_rej_mad, IB_REJ_TIMEOUT ); + break; + + default: + CL_ASSERT( p_mad->p_mad_buf->attr_id == CM_REQ_ATTR_ID || + p_mad->p_mad_buf->attr_id == CM_REP_ATTR_ID ); + ib_put_mad( p_rej_mad ); + return; + } + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__reject_req( + IN cep_agent_t* const p_port_cep, + IN ib_mad_element_t* const p_mad, + IN const ib_rej_status_t reason ) +{ + mad_cm_req_t *p_req; + mad_cm_rej_t *p_rej; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_port_cep ); + CL_ASSERT( p_mad ); + CL_ASSERT( reason != 0 ); + + p_req = (mad_cm_req_t*)p_mad->p_mad_buf; + p_rej = (mad_cm_rej_t*)p_mad->p_mad_buf; + + /* + * Format the reject information, overwriting the REQ data and send + * the response. + */ + p_rej->hdr.attr_id = CM_REJ_ATTR_ID; + p_rej->remote_comm_id = p_req->local_comm_id; + p_rej->local_comm_id = 0; + conn_rej_set_msg_rejected( 0, p_rej ); + p_rej->reason = reason; + conn_rej_set_ari( NULL, 0, p_rej ); + conn_rej_set_pdata( NULL, 0, p_rej ); + conn_rej_clr_rsvd_fields( p_rej ); + + p_mad->retry_cnt = 0; + p_mad->send_opt = 0; + p_mad->timeout_ms = 0; + p_mad->resp_expected = FALSE; + + __cep_send_mad( p_port_cep, p_mad ); + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__format_req_av( + IN kcep_t* const p_cep, + IN const mad_cm_req_t* const p_req, + IN const uint8_t idx ) +{ + cep_agent_t *p_port_cep; + const req_path_info_t *p_path; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_cep ); + CL_ASSERT( p_req ); + + cl_memclr( &p_cep->av[idx], sizeof(kcep_av_t) ); + + p_path = &((&p_req->primary_path)[idx]); + + p_port_cep = __find_port_cep( &p_path->remote_gid, + p_path->remote_lid, p_req->pkey, &p_cep->av[idx].pkey_index ); + if( !p_port_cep ) + { + if( !idx ) + p_cep->local_ca_guid = 0; + AL_EXIT( AL_DBG_CM ); + return; + } + + if( !idx ) + p_cep->local_ca_guid = p_port_cep->h_ca->obj.p_ci_ca->verbs.guid; + + /* Check that CA GUIDs match if formatting the alternate path. */ + if( idx && + p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid ) + { + AL_EXIT( AL_DBG_CM ); + return; + } + + /* + * Pkey indeces must match if formating the alternat path - the QP + * modify structure only allows for a single PKEY index to be specified. + */ + if( idx && + p_cep->av[0].pkey_index != p_cep->av[1].pkey_index ) + { + AL_EXIT( AL_DBG_CM ); + return; + } + + p_cep->av[idx].port_guid = p_port_cep->port_guid; + p_cep->av[idx].attr.port_num = p_port_cep->port_num; + + p_cep->av[idx].attr.sl = conn_req_path_get_svc_lvl( p_path ); + p_cep->av[idx].attr.dlid = p_path->local_lid; + + if( !conn_req_path_get_subn_lcl( p_path ) ) + { + p_cep->av[idx].attr.grh_valid = TRUE; + p_cep->av[idx].attr.grh.ver_class_flow = ib_grh_set_ver_class_flow( + 1, p_path->traffic_class, conn_req_path_get_flow_lbl( p_path ) ); + p_cep->av[idx].attr.grh.hop_limit = p_path->hop_limit; + p_cep->av[idx].attr.grh.dest_gid = p_path->local_gid; + p_cep->av[idx].attr.grh.src_gid = p_path->remote_gid; + } + else + { + p_cep->av[idx].attr.grh_valid = FALSE; + } + p_cep->av[idx].attr.static_rate = conn_req_path_get_pkt_rate( p_path ); + p_cep->av[idx].attr.path_bits = + (uint8_t)(p_path->remote_lid - p_port_cep->base_lid); + + /* + * Note that while we never use the connected AV attributes internally, + * we store them so we can pass them back to users. + */ + p_cep->av[idx].attr.conn.path_mtu = conn_req_get_mtu( p_req ); + p_cep->av[idx].attr.conn.local_ack_timeout = + conn_req_path_get_lcl_ack_timeout( p_path ); + p_cep->av[idx].attr.conn.seq_err_retry_cnt = + conn_req_get_retry_cnt( p_req ); + p_cep->av[idx].attr.conn.rnr_retry_cnt = + conn_req_get_rnr_retry_cnt( p_req ); + + AL_EXIT( AL_DBG_CM ); +} + + +/* + * + Validates the path information provided in the REQ and stores the + * associated CA attributes and port indeces. + * + Transitions a connection object from active to passive in the peer case. + * + Sets the path information in the connection and sets the CA GUID + * in the REQ callback record. + */ +static void +__save_wire_req( + IN OUT kcep_t* const p_cep, + IN OUT mad_cm_req_t* const p_req ) +{ + AL_ENTER( AL_DBG_CM ); + + p_cep->state = CEP_STATE_REQ_RCVD; + p_cep->was_active = FALSE; + + p_cep->sid = p_req->sid; + + /* Store pertinent information in the connection. */ + p_cep->remote_comm_id = p_req->local_comm_id; + p_cep->remote_ca_guid = p_req->local_ca_guid; + + p_cep->remote_qpn = conn_req_get_lcl_qpn( p_req ); + p_cep->local_qpn = 0; + + p_cep->retry_timeout = + __calc_mad_timeout( conn_req_get_lcl_resp_timeout( p_req ) ); + + /* Store the retry count. */ + p_cep->max_cm_retries = conn_req_get_max_cm_retries( p_req ); + + /* + * Copy the paths from the req_rec into the connection for + * future use. Note that if the primary path is invalid, + * the REP will fail. + */ + __format_req_av( p_cep, p_req, 0 ); + + if( p_req->alternate_path.local_lid ) + __format_req_av( p_cep, p_req, 1 ); + else + cl_memclr( &p_cep->av[1], sizeof(kcep_av_t) ); + + p_cep->idx_primary = 0; + + /* Store the maximum packet lifetime, used to calculate timewait. */ + p_cep->max_2pkt_life = conn_req_path_get_lcl_ack_timeout( &p_req->primary_path ); + p_cep->max_2pkt_life = max( p_cep->max_2pkt_life, + conn_req_path_get_lcl_ack_timeout( &p_req->alternate_path ) ); + + /* + * Make sure the target ack delay is cleared - the above + * "packet life" includes it. + */ + p_cep->target_ack_delay = 0; + + /* Store the requested initiator depth. */ + p_cep->req_init_depth = conn_req_get_init_depth( p_req ); + + /* + * Store the provided responder resources. These turn into the local + * QP's initiator depth. + */ + p_cep->init_depth = conn_req_get_resp_res( p_req ); + + p_cep->sq_psn = conn_req_get_starting_psn( p_req ); + + p_cep->tid = p_req->hdr.trans_id; + /* copy mad info for cm handoff */ + /* TODO: Do need to support CM handoff? */ + //p_cep->mads.req = *p_req; + + AL_EXIT( AL_DBG_CM ); +} + + +/* Must be called with the CEP lock held. */ +static void +__repeat_mad( + IN cep_agent_t* const p_port_cep, + IN kcep_t* const p_cep, + IN ib_mad_element_t* const p_mad ) +{ + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_port_cep ); + CL_ASSERT( p_cep ); + CL_ASSERT( p_mad ); + + /* Repeat the last mad sent for the connection. */ + switch( p_cep->state ) + { + case CEP_STATE_REQ_MRA_SENT: /* resend MRA(REQ) */ + case CEP_STATE_REP_MRA_SENT: /* resend MRA(REP) */ + case CEP_STATE_LAP_MRA_SENT: /* resend MRA(LAP) */ + case CEP_STATE_ESTABLISHED: /* resend RTU */ + case CEP_STATE_TIMEWAIT: /* resend the DREP */ + cl_memcpy( p_mad->p_mad_buf, &p_cep->mads, MAD_BLOCK_SIZE ); + p_mad->send_context1 = NULL; + p_mad->send_context2 = NULL; + __cep_send_mad( p_port_cep, p_mad ); + break; + + default: + /* Return the MAD to the mad pool */ + ib_put_mad( p_mad ); + break; + } + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__process_req( + IN cep_agent_t* const p_port_cep, + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_req_t *p_req; + kcep_t *p_cep, *p_new_cep, *p_stale_cep; + KLOCK_QUEUE_HANDLE hdl; + ib_rej_status_t reason; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + p_req = (mad_cm_req_t*)p_mad->p_mad_buf; + + AL_TRACE( AL_DBG_CM, + ("REQ: comm_id (x%x) qpn (x%x) received\n", + p_req->local_comm_id, conn_req_get_lcl_qpn( p_req )) ); + + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + + if( conn_req_get_qp_type( p_req ) > IB_QPT_UNRELIABLE_CONN ) + { + /* Reserved value. Reject. */ + AL_TRACE( AL_DBG_ERROR, ("Invalid transport type received.\n") ); + reason = IB_REJ_INVALID_XPORT; + goto reject; + } + + /* Match against pending connections using remote comm ID and CA GUID. */ + p_cep = __lookup_by_id( p_req->local_comm_id, p_req->local_ca_guid ); + if( p_cep ) + { + /* Already received the REQ. */ + switch( p_cep->state ) + { + case CEP_STATE_REQ_MRA_SENT: + __repeat_mad( p_port_cep, p_cep, p_mad ); + break; + + case CEP_STATE_TIMEWAIT: + case CEP_STATE_DESTROY: + /* Send a reject. */ + AL_TRACE( AL_DBG_CM, + ("REQ received for connection in TIME_WAIT state.\n") ); + __reject_req( p_port_cep, p_mad, IB_REJ_STALE_CONN ); + break; + + default: + /* + * Let regular retries repeat the MAD. If our last message was + * dropped, resending only adds to the congestion. If it wasn't + * dropped, then the remote CM will eventually process it, and + * we'd just be adding traffic. + */ + AL_TRACE( AL_DBG_CM, ("Duplicate REQ received.\n") ); + ib_put_mad( p_mad ); + } + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + AL_EXIT( AL_DBG_CM ); + return; + } + + /* + * Allocate a new CEP for the new request. This will + * prevent multiple identical REQs from queueing up for processing. + */ + p_new_cep = __create_cep(); + if( !p_new_cep ) + { + /* Reject the request for insufficient resources. */ + reason = IB_REJ_INSUF_RESOURCES; + AL_TRACE_EXIT( AL_DBG_ERROR, + ("al_create_cep failed\nREJ sent for insufficient resources.\n") ); + goto reject; + } + + __save_wire_req( p_new_cep, p_req ); + + /* + * Match against listens using SID and compare data, also provide the receiving + * MAD service's port GUID so we can properly filter. + */ + p_cep = __lookup_listen( p_req->sid, p_port_cep->port_guid, p_req->pdata ); + if( p_cep ) + { + __bind_cep( p_new_cep, p_cep->p_cid->h_al, p_cep->pfn_cb, NULL ); + + /* Add the new CEP to the map so that repeated REQs match up. */ + p_stale_cep = __insert_cep( p_new_cep ); + if( p_stale_cep != p_new_cep ) + { + /* Duplicate - must be a stale connection. */ + /* TODO: Fail the CEP in p_stale_cep */ + reason = IB_REJ_STALE_CONN; + goto unbind; + } + + /* + * Queue the mad - the return value indicates whether we should + * invoke the callback. + */ + status = __cep_queue_mad( p_cep, p_mad ); + switch( status ) + { + case IB_SUCCESS: + case IB_PENDING: + p_mad->send_context1 = p_new_cep; + break; + + default: + reason = IB_REJ_INSUF_RESOURCES; + goto unbind; + } + } + else + { + AL_TRACE( AL_DBG_CM, ("No listens active!\n") ); + + /* Match against peer-to-peer requests using SID and compare data. */ + //p_cep = __lookup_peer(); + //if( p_cep ) + //{ + // p_mad->send_context2 = NULL; + // p_list_item = cl_qlist_find_from_head( &gp_cep_mgr->pending_list, + // __match_peer, p_req ); + // if( p_list_item != cl_qlist_end( &gp_cep_mgr->pending_list ) ) + // { + // KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + // p_conn = PARENT_STRUCT( p_list_item, kcep_t, map_item ); + // __peer_req( p_port_cep, p_conn, p_async_mad->p_mad ); + // cl_free( p_async_mad ); + // CL_TRACE_EXIT( AL_DBG_CM, g_al_dbg_lvl, + // ("REQ matched a peer-to-peer request.\n") ); + // return; + // } + // reason = IB_REJ_INVALID_SID; + // goto free; + //} + //else + { + /* No match found. Reject. */ + reason = IB_REJ_INVALID_SID; + AL_TRACE( AL_DBG_CM, ("REQ received but no match found.\n") ); + goto cleanup; + } + } + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + /* Process any queued MADs for the CEP. */ + if( status == IB_SUCCESS ) + __process_cep( p_cep ); + + AL_EXIT( AL_DBG_CM ); + return; + +unbind: + __unbind_cep( p_new_cep ); + +cleanup: + /* + * Move the CEP in the idle state so that we don't send a reject + * for it when cleaning up. Also clear the RQPN and RCID so that + * we don't try to remove it from our maps (since it isn't inserted). + */ + p_new_cep->state = CEP_STATE_IDLE; + p_new_cep->remote_comm_id = 0; + p_new_cep->remote_qpn = 0; + __cleanup_cep( p_new_cep ); + +reject: + __reject_req( p_port_cep, p_mad, reason ); + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + AL_EXIT( AL_DBG_CM ); +} + + +static void +__save_wire_rep( + IN OUT kcep_t* const p_cep, + IN const mad_cm_rep_t* const p_rep ) +{ + AL_ENTER( AL_DBG_CM ); + + /* The send should have been cancelled during MRA processing. */ + p_cep->state = CEP_STATE_REP_RCVD; + + /* Store pertinent information in the connection. */ + p_cep->remote_comm_id = p_rep->local_comm_id; + p_cep->remote_ca_guid = p_rep->local_ca_guid; + + p_cep->remote_qpn = conn_rep_get_lcl_qpn( p_rep ); + + /* Store the remote endpoint's target ACK delay. */ + p_cep->target_ack_delay = conn_rep_get_target_ack_delay( p_rep ); + + /* Update the local ACK delay stored in the AV's. */ + p_cep->av[0].attr.conn.local_ack_timeout = calc_lcl_ack_timeout( + p_cep->av[0].attr.conn.local_ack_timeout, p_cep->target_ack_delay ); + p_cep->av[0].attr.conn.rnr_retry_cnt = conn_rep_get_rnr_retry_cnt( p_rep ); + + if( p_cep->av[1].port_guid ) + { + p_cep->av[1].attr.conn.local_ack_timeout = calc_lcl_ack_timeout( + p_cep->av[1].attr.conn.local_ack_timeout, + p_cep->target_ack_delay ); + p_cep->av[1].attr.conn.rnr_retry_cnt = + p_cep->av[0].attr.conn.rnr_retry_cnt; + } + + p_cep->init_depth = p_rep->resp_resources; + p_cep->resp_res = p_rep->initiator_depth; + + p_cep->sq_psn = conn_rep_get_starting_psn( p_rep ); + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__process_mra( + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_mra_t *p_mra; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + p_mra = (mad_cm_mra_t*)p_mad->p_mad_buf; + + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( NULL, p_mra->remote_comm_id ); + if( !p_cep ) + { + AL_TRACE( AL_DBG_CM, + ("MRA received that could not be matched.\n") ); + goto err; + } + + if( p_cep->remote_comm_id ) + { + if( p_cep->remote_comm_id != p_mra->local_comm_id ) + { + AL_TRACE( AL_DBG_CM, + ("MRA received that could not be matched.\n") ); + goto err; + } + } + /* + * Note that we don't update the CEP's remote comm ID - it messes up REP + * processing since a non-zero RCID implies the connection is in the RCID + * map. Adding it here requires checking there and conditionally adding + * it. Ignoring it is a valid thing to do. + */ + + if( !(p_cep->state & CEP_STATE_SENT) || + (1 << conn_mra_get_msg_mraed( p_mra ) != + (p_cep->state & CEP_MSG_MASK)) ) + { + /* Invalid state. */ + AL_TRACE( AL_DBG_CM, ("MRA received in invalid state.\n") ); + goto err; + } + + /* Delay the current send. */ + CL_ASSERT( p_cep->p_send_mad ); + ib_delay_mad( p_cep->h_mad_svc, p_cep->p_send_mad, + __calc_mad_timeout( conn_mra_get_svc_timeout( p_mra ) ) + + __calc_mad_timeout( p_cep->max_2pkt_life - 1 ) ); + + /* We only invoke a single callback for MRA. */ + if( p_cep->state & CEP_STATE_MRA ) + { + /* Invalid state. */ + AL_TRACE( AL_DBG_CM, ("Already received MRA.\n") ); + goto err; + } + + p_cep->state |= CEP_STATE_MRA; + + status = __cep_queue_mad( p_cep, p_mad ); + CL_ASSERT( status != IB_INVALID_STATE ); + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + if( status == IB_SUCCESS ) + __process_cep( p_cep ); + + AL_EXIT( AL_DBG_CM ); + return; + +err: + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + AL_EXIT( AL_DBG_CM ); +} + + +static void +__process_rej( + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_rej_t *p_rej; + kcep_t *p_cep = NULL; + KLOCK_QUEUE_HANDLE hdl; + net64_t ca_guid; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + p_rej = (mad_cm_rej_t*)p_mad->p_mad_buf; + + /* Either one of the communication IDs must be set. */ + if( !p_rej->remote_comm_id && !p_rej->local_comm_id ) + goto err1; + + /* Check the pending list by the remote CA GUID and connection ID. */ + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + if( p_rej->remote_comm_id ) + { + p_cep = __lookup_cep( NULL, p_rej->remote_comm_id ); + } + else if( p_rej->reason == IB_REJ_TIMEOUT && + conn_rej_get_ari_len( p_rej ) == sizeof(net64_t) ) + { + cl_memcpy( &ca_guid, p_rej->ari, sizeof(net64_t) ); + p_cep = __lookup_by_id( p_rej->local_comm_id, ca_guid ); + } + + if( !p_cep ) + { + goto err2; + } + + if( p_cep->remote_comm_id && + p_cep->remote_comm_id != p_rej->local_comm_id ) + { + goto err2; + } + + switch( p_cep->state ) + { + case CEP_STATE_REQ_SENT: + /* + * Ignore rejects with the status set to IB_REJ_INVALID_SID. We will + * continue to retry (up to max_cm_retries) to connect to the remote + * side. This is required to support peer-to-peer connections and + * clients that try to connect before the server comes up. + */ + if( p_rej->reason == IB_REJ_INVALID_SID ) + { + AL_TRACE( AL_DBG_CM, + ("Request rejected (invalid SID) - retrying.\n") ); + goto err2; + } + + /* Fall through */ + case CEP_STATE_REP_SENT: + case CEP_STATE_REQ_MRA_RCVD: + case CEP_STATE_REP_MRA_RCVD: + /* Cancel any outstanding MAD. */ + if( p_cep->p_send_mad ) + { + ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad ); + p_cep->p_send_mad = NULL; + } + + /* Fall through */ + case CEP_STATE_REQ_RCVD: + case CEP_STATE_REP_RCVD: + case CEP_STATE_REQ_MRA_SENT: + case CEP_STATE_REP_MRA_SENT: + case CEP_STATE_PRE_REP: + case CEP_STATE_PRE_REP_MRA_SENT: + if( p_cep->state & CEP_STATE_PREP ) + { + CL_ASSERT( p_cep->p_mad ); + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + } + /* Abort connection establishment. No transition to timewait. */ + __remove_cep( p_cep ); + p_cep->state = CEP_STATE_IDLE; + break; + + case CEP_STATE_ESTABLISHED: + case CEP_STATE_LAP_RCVD: + case CEP_STATE_LAP_SENT: + case CEP_STATE_LAP_MRA_RCVD: + case CEP_STATE_LAP_MRA_SENT: + case CEP_STATE_PRE_APR: + case CEP_STATE_PRE_APR_MRA_SENT: + if( p_cep->state & CEP_STATE_PREP ) + { + CL_ASSERT( p_cep->p_mad ); + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + } + p_cep->state = CEP_STATE_TIMEWAIT; + __insert_timewait( p_cep ); + break; + + default: + /* Ignore the REJ. */ + AL_TRACE( AL_DBG_CM, ("REJ received in invalid state.\n") ); + goto err2; + } + + status = __cep_queue_mad( p_cep, p_mad ); + CL_ASSERT( status != IB_INVALID_STATE ); + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + if( status == IB_SUCCESS ) + __process_cep( p_cep ); + + AL_EXIT( AL_DBG_CM ); + return; + +err2: + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); +err1: + ib_put_mad( p_mad ); + AL_EXIT( AL_DBG_CM ); +} + + +static void +__process_rep( + IN cep_agent_t* const p_port_cep, + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_rep_t *p_rep; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_state_t old_state; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + p_rep = (mad_cm_rep_t*)p_mad->p_mad_buf; + + AL_TRACE( AL_DBG_CM, + ("REP: comm_id (x%x) received\n", p_rep->local_comm_id ) ); + + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( NULL, p_rep->remote_comm_id ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + AL_TRACE_EXIT( AL_DBG_CM, + ("REP received that could not be matched.\n") ); + return; + } + + switch( p_cep->state ) + { + case CEP_STATE_REQ_MRA_RCVD: + case CEP_STATE_REQ_SENT: + old_state = p_cep->state; + /* Save pertinent information and change state. */ + __save_wire_rep( p_cep, p_rep ); + + if( __insert_cep( p_cep ) != p_cep ) + { + /* Roll back the state change. */ + p_cep->state = old_state; + __reject_mad( p_port_cep, p_cep, p_mad, IB_REJ_STALE_CONN ); + /* TODO: Handle stale connection. */ + break; + } + + /* + * Cancel any outstanding send. Note that we do this only after + * inserting the CEP - if we failed, then we the send will timeout + * and we'll finish our way through the state machine. + */ + if( p_cep->p_send_mad ) + { + ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad ); + p_cep->p_send_mad = NULL; + } + + status = __cep_queue_mad( p_cep, p_mad ); + CL_ASSERT( status != IB_INVALID_STATE ); + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + if( status == IB_SUCCESS ) + __process_cep( p_cep ); + + AL_EXIT( AL_DBG_CM ); + return; + + case CEP_STATE_ESTABLISHED: + case CEP_STATE_LAP_RCVD: + case CEP_STATE_LAP_SENT: + case CEP_STATE_LAP_MRA_RCVD: + case CEP_STATE_LAP_MRA_SENT: + case CEP_STATE_REP_MRA_SENT: + /* Repeate the MRA or RTU. */ + __repeat_mad( p_port_cep, p_cep, p_mad ); + break; + + default: + ib_put_mad( p_mad ); + AL_TRACE( AL_DBG_CM, ("REP received in invalid state.\n") ); + break; + } + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__process_rtu( + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_rtu_t *p_rtu; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + p_rtu = (mad_cm_rtu_t*)p_mad->p_mad_buf; + + AL_TRACE( AL_DBG_CM, + ("RTU: comm_id (x%x) received\n", p_rtu->local_comm_id) ); + + /* Find the connection by local connection ID. */ + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( NULL, p_rtu->remote_comm_id ); + if( !p_cep || p_cep->remote_comm_id != p_rtu->local_comm_id ) + { + AL_TRACE( AL_DBG_CM, ("RTU received that could not be matched.\n") ); + goto done; + } + + switch( p_cep->state ) + { + case CEP_STATE_REP_SENT: + case CEP_STATE_REP_MRA_RCVD: + /* Cancel any outstanding send. */ + if( p_cep->p_send_mad ) + { + ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad ); + p_cep->p_send_mad = NULL; + } + + p_cep->state = CEP_STATE_ESTABLISHED; + + status = __cep_queue_mad( p_cep, p_mad ); + CL_ASSERT( status != IB_INVALID_STATE ); + + /* Update timewait time. */ + __calc_timewait( p_cep ); + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + if( status == IB_SUCCESS ) + __process_cep( p_cep ); + + AL_EXIT( AL_DBG_CM ); + return; + + default: + AL_TRACE( AL_DBG_CM, ("RTU received in invalid state.\n") ); + break; + } + +done: + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + AL_EXIT( AL_DBG_CM ); +} + + +static void +__process_dreq( + IN cep_agent_t* const p_port_cep, + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_dreq_t *p_dreq; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + p_dreq = (mad_cm_dreq_t*)p_mad->p_mad_buf; + + AL_TRACE( AL_DBG_CM, + ("DREQ: comm_id (x%x) qpn (x%x) received\n", + p_dreq->local_comm_id, conn_dreq_get_remote_qpn( p_dreq )) ); + + /* Find the connection by connection IDs. */ + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( NULL, p_dreq->remote_comm_id ); + if( !p_cep || + p_cep->remote_comm_id != p_dreq->local_comm_id || + p_cep->local_qpn != conn_dreq_get_remote_qpn( p_dreq ) ) + { + AL_TRACE( AL_DBG_CM, ("DREQ received that could not be matched.\n") ); + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + AL_EXIT( AL_DBG_CM ); + return; + } + + switch( p_cep->state ) + { + case CEP_STATE_REP_SENT: + case CEP_STATE_REP_MRA_RCVD: + case CEP_STATE_DREQ_SENT: + /* Cancel the outstanding MAD. */ + if( p_cep->p_send_mad ) + { + ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad ); + p_cep->p_send_mad = NULL; + } + + /* Fall through and process as DREQ received case. */ + case CEP_STATE_ESTABLISHED: + case CEP_STATE_LAP_RCVD: + case CEP_STATE_LAP_SENT: + case CEP_STATE_LAP_MRA_RCVD: + case CEP_STATE_LAP_MRA_SENT: + p_cep->state = CEP_STATE_DREQ_RCVD; + + status = __cep_queue_mad( p_cep, p_mad ); + CL_ASSERT( status != IB_INVALID_STATE ); + + /* Store the TID for use in the reply DREP. */ + p_cep->tid = p_dreq->hdr.trans_id; + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + if( status == IB_SUCCESS ) + __process_cep( p_cep ); + AL_EXIT( AL_DBG_CM ); + return; + + case CEP_STATE_TIMEWAIT: + case CEP_STATE_DESTROY: + /* Repeat the DREP. */ + __repeat_mad( p_port_cep, p_cep, p_mad ); + break; + + default: + AL_TRACE( AL_DBG_CM, ("DREQ received in invalid state.\n") ); + case CEP_STATE_DREQ_RCVD: + ib_put_mad( p_mad ); + break; + } + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + AL_EXIT( AL_DBG_CM ); +} + + +static void +__process_drep( + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_drep_t *p_drep; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + p_drep = (mad_cm_drep_t*)p_mad->p_mad_buf; + + /* Find the connection by local connection ID. */ + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( NULL, p_drep->remote_comm_id ); + if( !p_cep || p_cep->remote_comm_id != p_drep->local_comm_id ) + { + AL_TRACE( AL_DBG_CM, ("DREP received that could not be matched.\n") ); + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + AL_EXIT( AL_DBG_CM ); + return; + } + + if( p_cep->state != CEP_STATE_DREQ_SENT && + p_cep->state != CEP_STATE_DREQ_DESTROY ) + { + AL_TRACE( AL_DBG_CM, ("DREP received in invalid state.\n") ); + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + AL_EXIT( AL_DBG_CM ); + return; + } + + /* Cancel the DREQ. */ + if( p_cep->p_send_mad ) + { + ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad ); + p_cep->p_send_mad = NULL; + } + + if( p_cep->state == CEP_STATE_DREQ_SENT ) + { + p_cep->state = CEP_STATE_TIMEWAIT; + + status = __cep_queue_mad( p_cep, p_mad ); + CL_ASSERT( status != IB_INVALID_STATE ); + } + else + { + /* State is DREQ_DESTROY - move to DESTROY to allow cleanup. */ + CL_ASSERT( p_cep->state == CEP_STATE_DREQ_DESTROY ); + p_cep->state = CEP_STATE_DESTROY; + + ib_put_mad( p_mad ); + status = IB_INVALID_STATE; + } + + __insert_timewait( p_cep ); + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + if( status == IB_SUCCESS ) + __process_cep( p_cep ); + + AL_EXIT( AL_DBG_CM ); +} + + +static boolean_t +__format_lap_av( + IN kcep_t* const p_cep, + IN const lap_path_info_t* const p_path ) +{ + cep_agent_t *p_port_cep; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_cep ); + CL_ASSERT( p_path ); + + cl_memclr( &p_cep->alt_av, sizeof(kcep_av_t) ); + + p_port_cep = __find_port_cep( &p_path->remote_gid, p_path->remote_lid, + p_cep->pkey, &p_cep->alt_av.pkey_index ); + if( !p_port_cep ) + { + AL_EXIT( AL_DBG_CM ); + return FALSE; + } + + if( p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid ) + { + AL_EXIT( AL_DBG_CM ); + return FALSE; + } + + p_cep->alt_av.port_guid = p_port_cep->port_guid; + p_cep->alt_av.attr.port_num = p_port_cep->port_num; + + p_cep->alt_av.attr.sl = conn_lap_path_get_svc_lvl( p_path ); + p_cep->alt_av.attr.dlid = p_path->local_lid; + + if( !conn_lap_path_get_subn_lcl( p_path ) ) + { + p_cep->alt_av.attr.grh_valid = TRUE; + p_cep->alt_av.attr.grh.ver_class_flow = ib_grh_set_ver_class_flow( + 1, conn_lap_path_get_tclass( p_path ), + conn_lap_path_get_flow_lbl( p_path ) ); + p_cep->alt_av.attr.grh.hop_limit = p_path->hop_limit; + p_cep->alt_av.attr.grh.dest_gid = p_path->local_gid; + p_cep->alt_av.attr.grh.src_gid = p_path->remote_gid; + } + else + { + p_cep->alt_av.attr.grh_valid = FALSE; + } + p_cep->alt_av.attr.static_rate = conn_lap_path_get_pkt_rate( p_path ); + p_cep->alt_av.attr.path_bits = + (uint8_t)(p_path->remote_lid - p_port_cep->base_lid); + + /* + * Note that while we never use the connected AV attributes internally, + * we store them so we can pass them back to users. For the LAP, we + * first copy the settings from the current primary - MTU and retry + * counts are only specified in the REQ. + */ + p_cep->alt_av.attr.conn = p_cep->av[p_cep->idx_primary].attr.conn; + p_cep->alt_av.attr.conn.local_ack_timeout = + conn_lap_path_get_lcl_ack_timeout( p_path ); + + AL_EXIT( AL_DBG_CM ); + return TRUE; +} + + +static void +__process_lap( + IN cep_agent_t* const p_port_cep, + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_lap_t *p_lap; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + p_lap = (mad_cm_lap_t*)p_mad->p_mad_buf; + + /* Find the connection by local connection ID. */ + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( NULL, p_lap->remote_comm_id ); + if( !p_cep || p_cep->remote_comm_id != p_lap->local_comm_id ) + { + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + AL_TRACE_EXIT( AL_DBG_CM, ("LAP received that could not be matched.\n") ); + return; + } + + switch( p_cep->state ) + { + case CEP_STATE_REP_SENT: + case CEP_STATE_REP_MRA_RCVD: + /* + * These two cases handle the RTU being dropped. Receipt of + * a LAP indicates that the connection is established. + */ + case CEP_STATE_ESTABLISHED: + /* + * We don't check for other "established" states related to + * alternate path management (CEP_STATE_LAP_RCVD, etc) + */ + + /* We only support receiving LAP if we took the passive role. */ + if( p_cep->was_active ) + { + ib_put_mad( p_mad ); + break; + } + + /* Store the transaction ID for use during the LAP exchange. */ + p_cep->tid = p_lap->hdr.trans_id; + + /* + * Copy the path record into the connection for use when + * sending the APR and loading the path. + */ + if( !__format_lap_av( p_cep, &p_lap->alternate_path ) ) + { + /* Trap an invalid path. */ + ib_put_mad( p_mad ); + break; + } + + p_cep->state = CEP_STATE_LAP_RCVD; + + status = __cep_queue_mad( p_cep, p_mad ); + CL_ASSERT( status != IB_INVALID_STATE ); + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + if( status == IB_SUCCESS ) + __process_cep( p_cep ); + + AL_EXIT( AL_DBG_CM ); + return; + + case CEP_STATE_LAP_MRA_SENT: + __repeat_mad( p_port_cep, p_cep, p_mad ); + break; + + default: + AL_TRACE( AL_DBG_CM, ("LAP received in invalid state.\n") ); + ib_put_mad( p_mad ); + break; + } + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + AL_EXIT( AL_DBG_CM ); +} + + +static void +__process_apr( + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_apr_t *p_apr; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + p_apr = (mad_cm_apr_t*)p_mad->p_mad_buf; + + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( NULL, p_apr->remote_comm_id ); + if( !p_cep || p_cep->remote_comm_id != p_apr->local_comm_id ) + { + AL_TRACE( AL_DBG_CM, ("APR received that could not be matched.\n") ); + goto done; + } + + switch( p_cep->state ) + { + case CEP_STATE_LAP_SENT: + case CEP_STATE_LAP_MRA_RCVD: + /* Cancel sending the LAP. */ + if( p_cep->p_send_mad ) + { + ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad ); + p_cep->p_send_mad = NULL; + } + + /* Copy the temporary alternate AV. */ + p_cep->av[(p_cep->idx_primary + 1) & 0x1] = p_cep->alt_av; + + /* Update the maximum packet lifetime. */ + p_cep->max_2pkt_life = max( p_cep->max_2pkt_life, p_cep->alt_2pkt_life ); + + /* Update the timewait time. */ + __calc_timewait( p_cep ); + + p_cep->state = CEP_STATE_ESTABLISHED; + + status = __cep_queue_mad( p_cep, p_mad ); + CL_ASSERT( status != IB_INVALID_STATE ); + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + if( status == IB_SUCCESS ) + __process_cep( p_cep ); + + AL_EXIT( AL_DBG_CM ); + return; + + default: + AL_TRACE( AL_DBG_CM, ("APR received in invalid state.\n") ); + break; + } + +done: + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + AL_EXIT( AL_DBG_CM ); +} + + +static void +__cep_mad_recv_cb( + IN ib_mad_svc_handle_t h_mad_svc, + IN void *context, + IN ib_mad_element_t *p_mad ) +{ + cep_agent_t *p_port_cep; + ib_mad_t *p_hdr; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + UNUSED_PARAM( h_mad_svc ); + p_port_cep = (cep_agent_t*)context; + + CL_ASSERT( p_mad->p_next == NULL ); + + p_hdr = (ib_mad_t*)p_mad->p_mad_buf; + + /* + * TODO: Add filtering in all the handlers for unsupported class version. + * See 12.6.7.2 Rejection Reason, code 31. + */ + + switch( p_hdr->attr_id ) + { + case CM_REQ_ATTR_ID: + __process_req( p_port_cep, p_mad ); + break; + + case CM_MRA_ATTR_ID: + __process_mra( p_mad ); + break; + + case CM_REJ_ATTR_ID: + __process_rej( p_mad ); + break; + + case CM_REP_ATTR_ID: + __process_rep( p_port_cep, p_mad ); + break; + + case CM_RTU_ATTR_ID: + __process_rtu( p_mad ); + break; + + case CM_DREQ_ATTR_ID: + __process_dreq( p_port_cep, p_mad ); + break; + + case CM_DREP_ATTR_ID: + __process_drep( p_mad ); + break; + + case CM_LAP_ATTR_ID: + __process_lap( p_port_cep, p_mad ); + break; + + case CM_APR_ATTR_ID: + __process_apr( p_mad ); + break; + + case CM_SIDR_REQ_ATTR_ID: +// p_async_mad->item.pfn_callback = __process_cm_sidr_req; +// break; +// + case CM_SIDR_REP_ATTR_ID: +// p_async_mad->item.pfn_callback = __process_cm_sidr_rep; +// break; +// + default: + ib_put_mad( p_mad ); + AL_TRACE_EXIT( AL_DBG_ERROR, + ("Invalid CM MAD attribute ID.\n") ); + return; + } + + AL_EXIT( AL_DBG_CM ); +} + + +static inline cep_agent_t* +__get_cep_agent( + IN kcep_t* const p_cep ) +{ + cl_map_item_t *p_item; + + CL_ASSERT( p_cep ); + + /* Look up the primary CEP port agent */ + p_item = cl_qmap_get( &gp_cep_mgr->port_map, + p_cep->av[p_cep->idx_primary].port_guid ); + if( p_item == cl_qmap_end( &gp_cep_mgr->port_map ) ) + return NULL; + + return PARENT_STRUCT( p_item, cep_agent_t, item ); +} + + +static inline void +__format_mad_av( + OUT ib_mad_element_t* const p_mad, + IN kcep_av_t* const p_av ) +{ + /* Set the addressing information in the MAD. */ + p_mad->grh_valid = p_av->attr.grh_valid; + if( p_av->attr.grh_valid ) + cl_memcpy( p_mad->p_grh, &p_av->attr.grh, sizeof(ib_grh_t) ); + + p_mad->remote_sl = p_av->attr.sl; + p_mad->remote_lid = p_av->attr.dlid; + p_mad->path_bits = p_av->attr.path_bits; + p_mad->pkey_index = p_av->pkey_index; + p_mad->remote_qp = IB_QP1; + p_mad->send_opt = IB_SEND_OPT_SIGNALED; + p_mad->remote_qkey = IB_QP1_WELL_KNOWN_Q_KEY; + /* Let the MAD service manage the AV for us. */ + p_mad->h_av = NULL; +} + + +static ib_api_status_t +__cep_send_mad( + IN cep_agent_t* const p_port_cep, + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_port_cep ); + CL_ASSERT( p_mad ); + + /* Use the mad's attributes already present */ + p_mad->resp_expected = FALSE; + p_mad->retry_cnt = 0; + p_mad->timeout_ms = 0; + + /* Clear the contexts since the send isn't associated with a CEP. */ + p_mad->context1 = NULL; + p_mad->context2 = NULL; + + status = ib_send_mad( p_port_cep->h_mad_svc, p_mad, NULL ); + if( status != IB_SUCCESS ) + { + ib_put_mad( p_mad ); + AL_TRACE( AL_DBG_ERROR, + ("ib_send_mad failed with status %s.\n", ib_get_err_str(status)) ); + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static ib_api_status_t +__cep_send_retry( + IN cep_agent_t* const p_port_cep, + IN kcep_t* const p_cep, + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_cep ); + CL_ASSERT( p_mad ); + CL_ASSERT( p_mad->p_mad_buf->attr_id == CM_REQ_ATTR_ID || + p_mad->p_mad_buf->attr_id == CM_REP_ATTR_ID || + p_mad->p_mad_buf->attr_id == CM_LAP_ATTR_ID || + p_mad->p_mad_buf->attr_id == CM_DREQ_ATTR_ID ); + + /* + * REQ, REP, and DREQ are retried until either a response is + * received or the operation times out. + */ + p_mad->resp_expected = TRUE; + p_mad->retry_cnt = p_cep->max_cm_retries; + p_mad->timeout_ms = p_cep->retry_timeout; + + CL_ASSERT( !p_cep->p_send_mad ); + + /* Store the mad & mad service handle in the CEP for cancelling. */ + p_cep->h_mad_svc = p_port_cep->h_mad_svc; + p_cep->p_send_mad = p_mad; + + /* reference the connection for which we are sending the MAD. */ + cl_atomic_inc( &p_cep->ref_cnt ); + + /* Set the context. */ + p_mad->context1 = p_cep; + p_mad->context2 = NULL; + + /* Fire in the hole! */ + status = ib_send_mad( p_cep->h_mad_svc, p_mad, NULL ); + if( status != IB_SUCCESS ) + { + /* + * Note that we don't need to check for destruction here since + * we're holding the global lock. + */ + cl_atomic_dec( &p_cep->ref_cnt ); + p_cep->p_send_mad = NULL; + ib_put_mad( p_mad ); + AL_TRACE( AL_DBG_ERROR, + ("ib_send_mad failed with status %s.\n", ib_get_err_str(status)) ); + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static void +__cep_mad_send_cb( + IN ib_mad_svc_handle_t h_mad_svc, + IN void *context, + IN ib_mad_element_t *p_mad ) +{ + ib_api_status_t status; + cep_agent_t *p_port_cep; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + ib_pfn_destroy_cb_t pfn_destroy_cb; + void *cep_context; + + AL_ENTER( AL_DBG_CM ); + + UNUSED_PARAM( h_mad_svc ); + CL_ASSERT( p_mad->p_next == NULL ); + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + p_port_cep = (cep_agent_t*)context; + + p_cep = (kcep_t* __ptr64)p_mad->context1; + + /* + * The connection context is not set when performing immediate responses, + * such as repeating MADS. + */ + if( !p_cep ) + { + ib_put_mad( p_mad ); + AL_EXIT( AL_DBG_CM ); + return; + } + + p_mad->context1 = NULL; + + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + /* Clear the sent MAD pointer so that we don't try cancelling again. */ + if( p_cep->p_send_mad == p_mad ) + p_cep->p_send_mad = NULL; + + switch( p_mad->status ) + { + case IB_WCS_SUCCESS: + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + break; + + case IB_WCS_CANCELED: + if( p_cep->state != CEP_STATE_REQ_SENT && + p_cep->state != CEP_STATE_REQ_MRA_RCVD && + p_cep->state != CEP_STATE_REP_SENT && + p_cep->state != CEP_STATE_REP_MRA_RCVD && + p_cep->state != CEP_STATE_LAP_SENT && + p_cep->state != CEP_STATE_LAP_MRA_RCVD && + p_cep->state != CEP_STATE_DREQ_SENT && + p_cep->state != CEP_STATE_SREQ_SENT ) + { + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + break; + } + /* Treat as a timeout so we don't stall the state machine. */ + p_mad->status = IB_WCS_TIMEOUT_RETRY_ERR; + + /* Fall through. */ + case IB_WCS_TIMEOUT_RETRY_ERR: + default: + /* Timeout. Reject the connection. */ + switch( p_cep->state ) + { + case CEP_STATE_REQ_SENT: + case CEP_STATE_REQ_MRA_RCVD: + case CEP_STATE_REP_SENT: + case CEP_STATE_REP_MRA_RCVD: + /* Send the REJ. */ + __reject_timeout( p_port_cep, p_cep, p_mad ); + __remove_cep( p_cep ); + p_cep->state = CEP_STATE_IDLE; + break; + + case CEP_STATE_DREQ_DESTROY: + p_cep->state = CEP_STATE_DESTROY; + __insert_timewait( p_cep ); + /* Fall through. */ + + case CEP_STATE_DESTROY: + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + goto done; + + case CEP_STATE_DREQ_SENT: + /* + * Make up a DREP mad so we can respond if we receive + * a DREQ while in timewait. + */ + __format_mad_hdr( &p_cep->mads.drep.hdr, p_cep, CM_DREP_ATTR_ID ); + __format_drep( p_cep, NULL, 0, &p_cep->mads.drep ); + p_cep->state = CEP_STATE_TIMEWAIT; + __insert_timewait( p_cep ); + + default: + break; + } + + status = __cep_queue_mad( p_cep, p_mad ); + CL_ASSERT( status != IB_INVALID_STATE ); + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + if( status == IB_SUCCESS ) + __process_cep( p_cep ); + break; + } + +done: + pfn_destroy_cb = p_cep->pfn_destroy_cb; + cep_context = p_cep->cep.context; + + if( !cl_atomic_dec( &p_cep->ref_cnt ) && pfn_destroy_cb ) + pfn_destroy_cb( cep_context ); + AL_EXIT( AL_DBG_CM ); +} + + +static void +__cep_qp_event_cb( + IN ib_async_event_rec_t *p_event_rec ) +{ + UNUSED_PARAM( p_event_rec ); + + /* + * Most of the QP events are trapped by the real owner of the QP. + * For real events, the CM may not be able to do much anyways! + */ +} + + +static ib_api_status_t +__init_data_svc( + IN cep_agent_t* const p_port_cep, + IN const ib_port_attr_t* const p_port_attr ) +{ + ib_api_status_t status; + ib_qp_create_t qp_create; + ib_mad_svc_t mad_svc; + + AL_ENTER( AL_DBG_CM ); + + /* + * Create the PD alias. We use the port CM's al_obj_t as the context + * to allow using deref_al_obj as the destroy callback. + */ + status = ib_alloc_pd( p_port_cep->h_ca, IB_PDT_ALIAS, &p_port_cep->obj, + &p_port_cep->h_pd ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("ib_alloc_pd failed with status %s\n", ib_get_err_str(status)) ); + return status; + } + /* Reference the port object on behalf of the PD. */ + ref_al_obj( &p_port_cep->obj ); + + /* Create the MAD QP. */ + cl_memclr( &qp_create, sizeof( ib_qp_create_t ) ); + qp_create.qp_type = IB_QPT_QP1_ALIAS; + qp_create.rq_depth = CEP_MAD_RQ_DEPTH; + qp_create.sq_depth = CEP_MAD_SQ_DEPTH; + qp_create.rq_sge = CEP_MAD_RQ_SGE; + qp_create.sq_sge = CEP_MAD_SQ_SGE; + qp_create.sq_signaled = TRUE; + /* + * We use the port CM's al_obj_t as the context to allow using + * deref_al_obj as the destroy callback. + */ + status = ib_get_spl_qp( p_port_cep->h_pd, p_port_attr->port_guid, + &qp_create, &p_port_cep->obj, __cep_qp_event_cb, &p_port_cep->pool_key, + &p_port_cep->h_qp ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("ib_get_spl_qp failed with status %s\n", ib_get_err_str(status)) ); + return status; + } + /* Reference the port object on behalf of the QP. */ + ref_al_obj( &p_port_cep->obj ); + + /* Create the MAD service. */ + cl_memclr( &mad_svc, sizeof(mad_svc) ); + mad_svc.mad_svc_context = p_port_cep; + mad_svc.pfn_mad_recv_cb = __cep_mad_recv_cb; + mad_svc.pfn_mad_send_cb = __cep_mad_send_cb; + mad_svc.support_unsol = TRUE; + mad_svc.mgmt_class = IB_MCLASS_COMM_MGMT; + mad_svc.mgmt_version = IB_MCLASS_CM_VER_2; + mad_svc.method_array[IB_MAD_METHOD_SEND] = TRUE; + status = + ib_reg_mad_svc( p_port_cep->h_qp, &mad_svc, &p_port_cep->h_mad_svc ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("ib_reg_mad_svc failed with status %s\n", ib_get_err_str(status)) ); + return status; + } + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +/* + * Performs immediate cleanup of resources. + */ +static void +__destroying_port_cep( + IN al_obj_t *p_obj ) +{ + cep_agent_t *p_port_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + p_port_cep = PARENT_STRUCT( p_obj, cep_agent_t, obj ); + + if( p_port_cep->port_guid ) + { + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + cl_qmap_remove_item( &gp_cep_mgr->port_map, &p_port_cep->item ); + KeReleaseInStackQueuedSpinLock( &hdl ); + } + + if( p_port_cep->h_qp ) + { + ib_destroy_qp( p_port_cep->h_qp, (ib_pfn_destroy_cb_t)deref_al_obj ); + p_port_cep->h_qp = NULL; + } + + if( p_port_cep->h_pd ) + { + ib_dealloc_pd( p_port_cep->h_pd, (ib_pfn_destroy_cb_t)deref_al_obj ); + p_port_cep->h_pd = NULL; + } + + AL_EXIT( AL_DBG_CM ); +} + + + +/* + * Release all resources allocated by a port CM agent. Finishes any cleanup + * for a port agent. + */ +static void +__free_port_cep( + IN al_obj_t *p_obj ) +{ + cep_agent_t *p_port_cep; + ib_port_attr_mod_t port_attr_mod; + + AL_ENTER( AL_DBG_CM ); + + p_port_cep = PARENT_STRUCT( p_obj, cep_agent_t, obj ); + + if( p_port_cep->h_ca ) + { + /* Update local port attributes */ + port_attr_mod.cap.cm = FALSE; + ib_modify_ca( p_port_cep->h_ca, p_port_cep->port_num, + IB_CA_MOD_IS_CM_SUPPORTED, &port_attr_mod ); + + deref_al_obj( &p_port_cep->h_ca->obj ); + } + + destroy_al_obj( &p_port_cep->obj ); + cl_free( p_port_cep ); + + AL_EXIT( AL_DBG_CM ); +} + + +/* + * Create a port agent for a given port. + */ +static ib_api_status_t +__create_port_cep( + IN ib_pnp_port_rec_t *p_pnp_rec ) +{ + cep_agent_t *p_port_cep; + ib_api_status_t status; + ib_port_attr_mod_t port_attr_mod; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + /* calculate size of port_cm struct */ + p_port_cep = (cep_agent_t*)cl_zalloc( sizeof(cep_agent_t) ); + if( !p_port_cep ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("Failed to cl_zalloc port CM agent.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + construct_al_obj( &p_port_cep->obj, AL_OBJ_TYPE_CM ); + + status = init_al_obj( &p_port_cep->obj, p_port_cep, TRUE, + __destroying_port_cep, NULL, __free_port_cep ); + if( status != IB_SUCCESS ) + { + __free_port_cep( &p_port_cep->obj ); + AL_TRACE_EXIT( AL_DBG_ERROR, + ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Attach to the global CM object. */ + status = attach_al_obj( &gp_cep_mgr->obj, &p_port_cep->obj ); + if( status != IB_SUCCESS ) + { + p_port_cep->obj.pfn_destroy( &p_port_cep->obj, NULL ); + AL_TRACE_EXIT( AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + p_port_cep->port_guid = p_pnp_rec->p_port_attr->port_guid; + p_port_cep->port_num = p_pnp_rec->p_port_attr->port_num; + p_port_cep->base_lid = p_pnp_rec->p_port_attr->lid; + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + cl_qmap_insert( + &gp_cep_mgr->port_map, p_port_cep->port_guid, &p_port_cep->item ); + KeReleaseInStackQueuedSpinLock( &hdl ); + + /* Get a reference to the CA on which we are loading. */ + p_port_cep->h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid ); + if( !p_port_cep->h_ca ) + { + p_port_cep->obj.pfn_destroy( &p_port_cep->obj, NULL ); + AL_TRACE_EXIT( AL_DBG_ERROR, ("acquire_ca failed.\n") ); + return IB_INVALID_GUID; } + + status = __init_data_svc( p_port_cep, p_pnp_rec->p_port_attr ); + if( status != IB_SUCCESS ) + { + p_port_cep->obj.pfn_destroy( &p_port_cep->obj, NULL ); + AL_TRACE_EXIT( AL_DBG_ERROR, + ("__init_data_svc failed with status %s.\n", + ib_get_err_str(status)) ); + return status; + } + + /* Update local port attributes */ + cl_memclr( &port_attr_mod, sizeof(ib_port_attr_mod_t) ); + port_attr_mod.cap.cm = TRUE; + status = ib_modify_ca( p_port_cep->h_ca, p_pnp_rec->p_port_attr->port_num, + IB_CA_MOD_IS_CM_SUPPORTED, &port_attr_mod ); + + /* Update the PNP context to reference this port. */ + p_pnp_rec->pnp_rec.context = p_port_cep; + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &p_port_cep->obj ); + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +/****************************************************************************** +* Global CEP manager +******************************************************************************/ + +static cep_cid_t* +__get_lcid( + OUT net32_t* const p_cid ) +{ + cl_status_t status; + uint32_t size, cid; + cep_cid_t *p_cep_cid; + + AL_ENTER( AL_DBG_CM ); + + size = (uint32_t)cl_vector_get_size( &gp_cep_mgr->cid_vector ); + cid = gp_cep_mgr->free_cid; + if( gp_cep_mgr->free_cid == size ) + { + /* Grow the vector pool. */ + status = + cl_vector_set_size( &gp_cep_mgr->cid_vector, size + CEP_CID_GROW ); + if( status != CL_SUCCESS ) + { + AL_EXIT( AL_DBG_CM ); + return NULL; + } + /* + * Return the the start of the free list since the + * entry initializer incremented it. + */ + gp_cep_mgr->free_cid = size; + } + + /* Get the next free entry. */ + p_cep_cid = (cep_cid_t*)cl_vector_get_ptr( &gp_cep_mgr->cid_vector, cid ); + + /* Update the next entry index. */ + gp_cep_mgr->free_cid = (uint32_t)(uintn_t)p_cep_cid->p_cep; + + *p_cid = cid; + + AL_EXIT( AL_DBG_CM ); + return p_cep_cid; +} + + +static inline kcep_t* +__lookup_cep( + IN ib_al_handle_t h_al OPTIONAL, + IN net32_t cid ) +{ + size_t idx; + cep_cid_t *p_cid; + + /* Mask off the counter bits so we get the index in our vector. */ + idx = cid & CEP_MAX_CID_MASK; + + /* + * Remove the CEP from the CID vector - no further API calls + * will succeed for it. + */ + if( idx > cl_vector_get_size( &gp_cep_mgr->cid_vector ) ) + return NULL; + + p_cid = (cep_cid_t*)cl_vector_get_ptr( &gp_cep_mgr->cid_vector, idx ); + if( !p_cid->h_al ) + return NULL; + + /* + * h_al is NULL when processing MADs, so we need to match on + * the actual local communication ID. If h_al is non-NULL, we + * are doing a lookup from a call to our API, and only need to match + * on the index in the vector (without the modifier). + */ + if( h_al ) + { + if( p_cid->h_al != h_al ) + return NULL; + } + else if( p_cid->p_cep->local_comm_id != cid ) + { + return NULL; + } + + return p_cid->p_cep; +} + + +/* + * Lookup a CEP by remote comm ID and CA GUID. + */ +static kcep_t* +__lookup_by_id( + IN net32_t remote_comm_id, + IN net64_t remote_ca_guid ) +{ + cl_rbmap_item_t *p_item; + kcep_t *p_cep; + + AL_ENTER( AL_DBG_CM ); + + /* Match against pending connections using remote comm ID and CA GUID. */ + p_item = cl_rbmap_root( &gp_cep_mgr->conn_id_map ); + while( p_item != cl_rbmap_end( &gp_cep_mgr->conn_id_map ) ) + { + p_cep = PARENT_STRUCT( p_item, kcep_t, rem_id_item ); + + if( remote_comm_id < p_cep->remote_comm_id ) + p_item = cl_rbmap_left( p_item ); + else if( remote_comm_id > p_cep->remote_comm_id ) + p_item = cl_rbmap_right( p_item ); + else if( remote_ca_guid < p_cep->remote_ca_guid ) + p_item = cl_rbmap_left( p_item ); + else if( remote_ca_guid > p_cep->remote_ca_guid ) + p_item = cl_rbmap_right( p_item ); + else + return p_cep; + } + + AL_EXIT( AL_DBG_CM ); + return NULL; +} + + +/* + * Lookup a CEP by Service ID and private data. + */ +static kcep_t* +__lookup_listen( + IN net64_t sid, + IN net64_t port_guid, + IN uint8_t *p_pdata ) +{ + cl_rbmap_item_t *p_item; + kcep_t *p_cep; + intn_t cmp; + + AL_ENTER( AL_DBG_CM ); + + /* Match against pending connections using remote comm ID and CA GUID. */ + p_item = cl_rbmap_root( &gp_cep_mgr->listen_map ); + while( p_item != cl_rbmap_end( &gp_cep_mgr->listen_map ) ) + { + p_cep = PARENT_STRUCT( p_item, kcep_t, listen_item ); + + if( sid == p_cep->sid ) + goto port_cmp; + else if( sid < p_cep->sid ) + p_item = cl_rbmap_left( p_item ); + else + p_item = cl_rbmap_right( p_item ); + + continue; + +port_cmp: + if( p_cep->port_guid != IB_ALL_PORTS ) + { + if( port_guid == p_cep->port_guid ) + goto pdata_cmp; + else if( port_guid < p_cep->port_guid ) + p_item = cl_rbmap_left( p_item ); + else + p_item = cl_rbmap_right( p_item ); + + continue; + } + +pdata_cmp: + if( p_cep->p_cmp_buf && p_pdata ) + { + cmp = cl_memcmp( &p_pdata[p_cep->cmp_offset], + p_cep->p_cmp_buf, p_cep->cmp_len ); + + if( !cmp ) + goto match; + else if( cmp < 0 ) + p_item = cl_rbmap_left( p_item ); + else + p_item = cl_rbmap_right( p_item ); + + AL_TRACE( AL_DBG_CM, + ("Svc ID match but compare buffer mismatch.\n") ); + continue; + } + +match: + /* Everything matched. */ + AL_EXIT( AL_DBG_CM ); + return p_cep; + } + + AL_EXIT( AL_DBG_CM ); + return NULL; +} + + +static kcep_t* +__insert_by_id( + IN kcep_t* const p_new_cep ) +{ + kcep_t *p_cep; + cl_rbmap_item_t *p_item, *p_insert_at; + boolean_t left = TRUE; + + AL_ENTER( AL_DBG_CM ); + + p_item = cl_rbmap_root( &gp_cep_mgr->conn_id_map ); + p_insert_at = p_item; + while( p_item != cl_rbmap_end( &gp_cep_mgr->conn_id_map ) ) + { + p_insert_at = p_item; + p_cep = PARENT_STRUCT( p_item, kcep_t, rem_id_item ); + + if( p_new_cep->remote_comm_id < p_cep->remote_comm_id ) + p_item = cl_rbmap_left( p_item ), left = TRUE; + else if( p_new_cep->remote_comm_id > p_cep->remote_comm_id ) + p_item = cl_rbmap_right( p_item ), left = FALSE; + else if( p_new_cep->remote_ca_guid < p_cep->remote_ca_guid ) + p_item = cl_rbmap_left( p_item ), left = TRUE; + else if( p_new_cep->remote_ca_guid > p_cep->remote_ca_guid ) + p_item = cl_rbmap_right( p_item ), left = FALSE; + else + goto done; + } + + cl_rbmap_insert( + &gp_cep_mgr->conn_id_map, p_insert_at, &p_new_cep->rem_id_item, left ); + p_cep = p_new_cep; + +done: + AL_EXIT( AL_DBG_CM ); + return p_cep; +} + + +static kcep_t* +__insert_by_qpn( + IN kcep_t* const p_new_cep ) +{ + kcep_t *p_cep; + cl_rbmap_item_t *p_item, *p_insert_at; + boolean_t left = TRUE; + + AL_ENTER( AL_DBG_CM ); + + p_item = cl_rbmap_root( &gp_cep_mgr->conn_qp_map ); + p_insert_at = p_item; + while( p_item != cl_rbmap_end( &gp_cep_mgr->conn_qp_map ) ) + { + p_insert_at = p_item; + p_cep = PARENT_STRUCT( p_item, kcep_t, rem_id_item ); + + if( p_new_cep->remote_qpn < p_cep->remote_qpn ) + p_item = cl_rbmap_left( p_item ), left = TRUE; + else if( p_new_cep->remote_qpn > p_cep->remote_qpn ) + p_item = cl_rbmap_right( p_item ), left = FALSE; + else if( p_new_cep->remote_ca_guid < p_cep->remote_ca_guid ) + p_item = cl_rbmap_left( p_item ), left = TRUE; + else if( p_new_cep->remote_ca_guid > p_cep->remote_ca_guid ) + p_item = cl_rbmap_right( p_item ), left = FALSE; + else + goto done; + } + + cl_rbmap_insert( + &gp_cep_mgr->conn_qp_map, p_insert_at, &p_new_cep->rem_qp_item, left ); + p_cep = p_new_cep; + +done: + AL_EXIT( AL_DBG_CM ); + return p_cep; +} + + +static inline kcep_t* +__insert_cep( + IN kcep_t* const p_new_cep ) +{ + kcep_t *p_cep; + + AL_ENTER( AL_DBG_CM ); + + p_cep = __insert_by_qpn( p_new_cep ); + if( p_cep != p_new_cep ) + goto done; + + p_cep = __insert_by_id( p_new_cep ); + if( p_cep != p_new_cep ) + { + cl_rbmap_remove_item( + &gp_cep_mgr->conn_qp_map, &p_new_cep->rem_qp_item ); + p_cep->remote_qpn = 0; + } + +done: + AL_EXIT( AL_DBG_CM ); + return p_cep; +} + + +static inline void +__remove_cep( + IN kcep_t* const p_cep ) +{ + AL_ENTER( AL_DBG_CM ); + + if( p_cep->remote_comm_id ) + { + cl_rbmap_remove_item( + &gp_cep_mgr->conn_id_map, &p_cep->rem_id_item ); + p_cep->remote_comm_id = 0; + } + if( p_cep->remote_qpn ) + { + cl_rbmap_remove_item( + &gp_cep_mgr->conn_qp_map, &p_cep->rem_qp_item ); + p_cep->remote_qpn = 0; + } + + AL_EXIT( AL_DBG_CM ); +} + + +static boolean_t +__is_lid_valid( + IN ib_net16_t lid, + IN ib_net16_t port_lid, + IN uint8_t lmc ) +{ + uint16_t lid1; + uint16_t lid2; + uint16_t path_bits; + + if(lmc) + { + lid1 = CL_NTOH16(lid); + lid2 = CL_NTOH16(port_lid); + path_bits = 0; + + if( lid1 < lid2 ) + return FALSE; + + while( lmc-- ) + path_bits = (uint16_t)( (path_bits << 1) | 1 ); + + lid2 |= path_bits; + + if( lid1 > lid2) + return FALSE; + } + else + { + if (lid != port_lid) + return FALSE; + } + + return TRUE; +} + + +static inline boolean_t +__is_gid_valid( + IN const ib_port_attr_t* const p_port_attr, + IN const ib_gid_t* const p_gid ) +{ + uint16_t idx; + + for( idx = 0; idx < p_port_attr->num_gids; idx++ ) + { + if( !cl_memcmp( + p_gid, &p_port_attr->p_gid_table[idx], sizeof(ib_gid_t) ) ) + { + return TRUE; + } + } + return FALSE; +} + + +static inline boolean_t +__get_pkey_index( + IN const ib_port_attr_t* const p_port_attr, + IN const net16_t pkey, + OUT uint16_t* const p_pkey_index ) +{ + uint16_t idx; + + for( idx = 0; idx < p_port_attr->num_pkeys; idx++ ) + { + if( p_port_attr->p_pkey_table[idx] == pkey ) + { + *p_pkey_index = idx; + return TRUE; + } + } + + return FALSE; +} + + +/* Returns the 1-based port index of the CEP agent with the specified GID. */ +static cep_agent_t* +__find_port_cep( + IN const ib_gid_t* const p_gid, + IN const net16_t lid, + IN const net16_t pkey, + OUT uint16_t* const p_pkey_index ) +{ + cep_agent_t *p_port_cep; + cl_list_item_t *p_item; + const ib_port_attr_t *p_port_attr; + + AL_ENTER( AL_DBG_CM ); + + cl_spinlock_acquire( &gp_cep_mgr->obj.lock ); + for( p_item = cl_qlist_head( &gp_cep_mgr->obj.obj_list ); + p_item != cl_qlist_end( &gp_cep_mgr->obj.obj_list ); + p_item = cl_qlist_next( p_item ) ) + { + p_port_cep = PARENT_STRUCT( p_item, cep_agent_t, obj.pool_item ); + + CL_ASSERT( p_port_cep->port_num ); + + ci_ca_lock_attr( p_port_cep->h_ca->obj.p_ci_ca ); + + p_port_attr = p_port_cep->h_ca->obj.p_ci_ca->p_pnp_attr->p_port_attr; + p_port_attr += (p_port_cep->port_num - 1); + + if( __is_lid_valid( lid, p_port_attr->lid, p_port_attr->lmc ) && + __is_gid_valid( p_port_attr, p_gid ) && + __get_pkey_index( p_port_attr, pkey, p_pkey_index ) ) + { + ci_ca_unlock_attr( p_port_cep->h_ca->obj.p_ci_ca ); + cl_spinlock_release( &gp_cep_mgr->obj.lock ); + AL_EXIT( AL_DBG_CM ); + return p_port_cep; + } + + ci_ca_unlock_attr( p_port_cep->h_ca->obj.p_ci_ca ); + } + cl_spinlock_release( &gp_cep_mgr->obj.lock ); + AL_EXIT( AL_DBG_CM ); + return NULL; +} + + +/* + * PnP callback for port event notifications. + */ +static ib_api_status_t +__cep_pnp_cb( + IN ib_pnp_rec_t *p_pnp_rec ) +{ + ib_api_status_t status = IB_SUCCESS; + + AL_ENTER( AL_DBG_CM ); + + switch( p_pnp_rec->pnp_event ) + { + case IB_PNP_PORT_ADD: + /* Create the port agent. */ + CL_ASSERT( !p_pnp_rec->context ); + status = __create_port_cep( (ib_pnp_port_rec_t*)p_pnp_rec ); + break; + + case IB_PNP_PORT_REMOVE: + CL_ASSERT( p_pnp_rec->context ); + + /* Destroy the port agent. */ + ref_al_obj( &((cep_agent_t* __ptr64)p_pnp_rec->context)->obj ); + ((cep_agent_t* __ptr64)p_pnp_rec->context)->obj.pfn_destroy( + &((cep_agent_t* __ptr64)p_pnp_rec->context)->obj, NULL ); + break; + + default: + break; /* Ignore other PNP events. */ + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static inline int64_t +__min_timewait( + IN int64_t current_min, + IN kcep_t* const p_cep ) +{ + /* + * The minimum timer interval is 50 milliseconds. This means + * 500000 100ns increments. Since __process_timewait divides the + * result in half (so that the worst cast timewait interval is 150%) + * we compensate for this here. Note that relative time values are + * expressed as negative. + */ +#define MIN_TIMEWAIT_100NS -1000000 + + /* Still in timewait - try again next time. */ + if( !current_min ) + { + return min( p_cep->timewait_time.QuadPart, MIN_TIMEWAIT_100NS ); + } + else + { + return max( current_min, + min( p_cep->timewait_time.QuadPart, MIN_TIMEWAIT_100NS ) ); + } +} + + +/* + * Timer callback to process CEPs in timewait state. Returns time in ms. + */ +static uint32_t +__process_timewait() +{ + cl_list_item_t *p_item; + kcep_t *p_cep; + LARGE_INTEGER timeout; + int64_t min_timewait = 0; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + timeout.QuadPart = 0; + + p_item = cl_qlist_head( &gp_cep_mgr->timewait_list ); + while( p_item != cl_qlist_end( &gp_cep_mgr->timewait_list ) ) + { + p_cep = PARENT_STRUCT( p_item, kcep_t, timewait_item ); + p_item = cl_qlist_next( p_item ); + + CL_ASSERT( p_cep->state == CEP_STATE_DESTROY || + p_cep->state == CEP_STATE_TIMEWAIT ); + + CL_ASSERT( !p_cep->p_mad ); + + if( KeWaitForSingleObject( &p_cep->timewait_timer, Executive, + KernelMode, FALSE, &timeout ) != STATUS_SUCCESS ) + { + /* Still in timewait - try again next time. */ + min_timewait = __min_timewait( min_timewait, p_cep ); + continue; + } + + if( p_cep->ref_cnt ) + { + /* Send outstanding or destruction in progress. */ + min_timewait = __min_timewait( min_timewait, p_cep ); + continue; + } + + /* Remove from the timewait list. */ + cl_qlist_remove_item( &gp_cep_mgr->timewait_list, &p_cep->timewait_item ); + + /* + * Not in timewait. Remove the CEP from the maps - it should + * no longer be matched against. + */ + __remove_cep( p_cep ); + + if( p_cep->state == CEP_STATE_DESTROY ) + { + __destroy_cep( p_cep ); + } + else + { + /* Move the CEP to the IDLE state so that it can be used again. */ + p_cep->state = CEP_STATE_IDLE; + } + } + + AL_EXIT( AL_DBG_CM ); + return (uint32_t)(min_timewait / -20000); +} + + +/* + * Timer callback to process CEPs in timewait state. + */ +static void +__cep_timewait_cb( + IN void *context ) +{ + KLOCK_QUEUE_HANDLE hdl; + uint32_t min_timewait; + + AL_ENTER( AL_DBG_CM ); + + UNUSED_PARAM( context ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + + min_timewait = __process_timewait(); + + if( cl_qlist_count( &gp_cep_mgr->timewait_list ) ) + { + /* + * Reset the timer for half of the shortest timeout - this results + * in a worst case timeout of 150% of timewait. + */ + cl_timer_trim( &gp_cep_mgr->timewait_timer, min_timewait ); + } + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + AL_EXIT( AL_DBG_CM ); +} + + +/* + * Starts immediate cleanup of the CM. Invoked during al_obj destruction. + */ +static void +__destroying_cep_mgr( + IN al_obj_t* p_obj ) +{ + ib_api_status_t status; + KLOCK_QUEUE_HANDLE hdl; + cl_list_item_t *p_item; + kcep_t *p_cep; + LARGE_INTEGER timeout; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( &gp_cep_mgr->obj == p_obj ); + UNUSED_PARAM( p_obj ); + + /* Deregister from PnP notifications. */ + if( gp_cep_mgr->h_pnp ) + { + status = ib_dereg_pnp( + gp_cep_mgr->h_pnp, (ib_pfn_destroy_cb_t)deref_al_obj ); + if( status != IB_SUCCESS ) + { + CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl, + ("ib_dereg_pnp failed with status %s.\n", + ib_get_err_str(status)) ); + deref_al_obj( &gp_cep_mgr->obj ); + } + } + + /* Cancel all timewait timers. */ + timeout.QuadPart = 0; + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + for( p_item = cl_qlist_head( &gp_cep_mgr->timewait_list ); + p_item != cl_qlist_end( &gp_cep_mgr->timewait_list ); + p_item = cl_qlist_next( p_item ) ) + { + p_cep = PARENT_STRUCT( p_item, kcep_t, timewait_item ); + KeSetTimer( &p_cep->timewait_timer, timeout, NULL ); + } + __process_timewait(); + KeReleaseInStackQueuedSpinLock( &hdl ); + + AL_EXIT( AL_DBG_CM ); +} + + +/* + * Frees the global CEP agent. Invoked during al_obj destruction. + */ +static void +__free_cep_mgr( + IN al_obj_t* p_obj ) +{ + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( &gp_cep_mgr->obj == p_obj ); + /* All listen request should have been cleaned up by this point. */ + CL_ASSERT( cl_is_rbmap_empty( &gp_cep_mgr->listen_map ) ); + /* All connections should have been cancelled/disconnected by now. */ + CL_ASSERT( cl_is_rbmap_empty( &gp_cep_mgr->conn_id_map ) ); + CL_ASSERT( cl_is_rbmap_empty( &gp_cep_mgr->conn_qp_map ) ); + + cl_vector_destroy( &gp_cep_mgr->cid_vector ); + + cl_timer_destroy( &gp_cep_mgr->timewait_timer ); + + /* + * All CM port agents should have been destroyed by now via the + * standard child object destruction provided by the al_obj. + */ + ExDeleteNPagedLookasideList( &gp_cep_mgr->cep_pool ); + destroy_al_obj( p_obj ); + + cl_free( gp_cep_mgr ); + gp_cep_mgr = NULL; + + AL_EXIT( AL_DBG_CM ); +} + + +static cl_status_t +__cid_init( + IN void* const p_element, + IN void* context ) +{ + cep_cid_t *p_cid; + + UNUSED_PARAM( context ); + + p_cid = (cep_cid_t*)p_element; + + p_cid->h_al = NULL; + p_cid->p_cep = (kcep_t*)(uintn_t)++gp_cep_mgr->free_cid; + p_cid->modifier = 0; + + return CL_SUCCESS; +} + + +/* + * Allocates and initialized the global CM agent. + */ +ib_api_status_t +create_cep_mgr( + IN al_obj_t* const p_parent_obj ) +{ + ib_api_status_t status; + cl_status_t cl_status; + ib_pnp_req_t pnp_req; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( gp_cep_mgr == NULL ); + + /* Allocate the global CM agent. */ + gp_cep_mgr = (al_cep_mgr_t*)cl_zalloc( sizeof(al_cep_mgr_t) ); + if( !gp_cep_mgr ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("Failed allocation of global CM agent.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + construct_al_obj( &gp_cep_mgr->obj, AL_OBJ_TYPE_CM ); + ExInitializeNPagedLookasideList( &gp_cep_mgr->cep_pool, NULL, NULL, + 0, sizeof(kcep_t), 'PECK', 0 ); + cl_qmap_init( &gp_cep_mgr->port_map ); + cl_rbmap_init( &gp_cep_mgr->listen_map ); + cl_rbmap_init( &gp_cep_mgr->conn_id_map ); + cl_rbmap_init( &gp_cep_mgr->conn_qp_map ); + cl_qlist_init( &gp_cep_mgr->timewait_list ); + /* Timer initialization can't fail in kernel-mode. */ + cl_timer_init( &gp_cep_mgr->timewait_timer, __cep_timewait_cb, NULL ); + cl_vector_construct( &gp_cep_mgr->cid_vector ); + + status = init_al_obj( &gp_cep_mgr->obj, NULL, FALSE, + __destroying_cep_mgr, NULL, __free_cep_mgr ); + if( status != IB_SUCCESS ) + { + __free_cep_mgr( &gp_cep_mgr->obj ); + AL_TRACE_EXIT( AL_DBG_ERROR, + ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) ); + return status; + } + /* Attach to the parent object. */ + status = attach_al_obj( p_parent_obj, &gp_cep_mgr->obj ); + if( status != IB_SUCCESS ) + { + gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL ); + AL_TRACE_EXIT( AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + cl_status = cl_vector_init( &gp_cep_mgr->cid_vector, + CEP_CID_MIN, CEP_CID_GROW, sizeof(cep_cid_t), __cid_init, NULL, NULL ); + if( cl_status != CL_SUCCESS ) + { + gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL ); + AL_TRACE_EXIT( AL_DBG_ERROR, + ("cl_vector_init failed with status %s.\n", + CL_STATUS_MSG(cl_status)) ); + return ib_convert_cl_status( cl_status ); + } + + gp_cep_mgr->free_cid = 0; + + /* Register for port PnP notifications. */ + cl_memclr( &pnp_req, sizeof(pnp_req) ); + pnp_req.pnp_class = IB_PNP_PORT; + pnp_req.pnp_context = &gp_cep_mgr->obj; + pnp_req.pfn_pnp_cb = __cep_pnp_cb; + status = ib_reg_pnp( gh_al, &pnp_req, &gp_cep_mgr->h_pnp ); + if( status != IB_SUCCESS ) + { + gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL ); + AL_TRACE_EXIT( AL_DBG_ERROR, + ("ib_reg_pnp failed with status %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* + * Leave the reference taken in init_al_obj oustanding since PnP + * deregistration is asynchronous. This replaces a call to ref and + * deref the object. + */ + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + +/****************************************************************************** +* CEP manager API +******************************************************************************/ + +/* Called with the CEP and CEP manager locks held */ +static ib_api_status_t +__cep_queue_mad( + IN kcep_t* const p_cep, + IN ib_mad_element_t* p_mad ) +{ + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( !p_mad->p_next ); + + if( p_cep->state == CEP_STATE_DESTROY ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_STATE; + } + + /* Queue this MAD for processing. */ + if( p_cep->p_mad_head ) + { + CL_ASSERT( p_cep->signalled ); + /* + * If there's already a MAD at the head of the list, we will not + * invoke the callback. Just queue and exit. + */ + CL_ASSERT( p_cep->p_mad_tail ); + p_cep->p_mad_tail->p_next = p_mad; + p_cep->p_mad_tail = p_mad; + AL_EXIT( AL_DBG_CM ); + return IB_PENDING; + } + + p_cep->p_mad_head = p_mad; + p_cep->p_mad_tail = p_mad; + + if( p_cep->signalled ) + { + /* signalled was already non-zero. Don't invoke the callback again. */ + AL_EXIT( AL_DBG_CM ); + return IB_PENDING; + } + + p_cep->signalled = TRUE; + + /* Take a reference since we're about to invoke the callback. */ + cl_atomic_inc( &p_cep->ref_cnt ); + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +static inline void +__cep_complete_irp( + IN kcep_t* const p_cep, + IN NTSTATUS status, + IN CCHAR increment ) +{ + IRP *p_irp; + + AL_ENTER( AL_DBG_CM ); + + p_irp = InterlockedExchangePointer( &p_cep->p_irp, NULL ); + + if( p_irp ) + { +#pragma warning(push, 3) + IoSetCancelRoutine( p_irp, NULL ); +#pragma warning(pop) + + /* Complete the IRP. */ + p_irp->IoStatus.Status = status; + p_irp->IoStatus.Information = 0; + IoCompleteRequest( p_irp, increment ); + } + + AL_EXIT( AL_DBG_CM ); +} + + +static inline void +__process_cep( + IN kcep_t* const p_cep ) +{ + ib_pfn_destroy_cb_t pfn_destroy_cb; + void *context; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + /* Signal to the user there are callback waiting. */ + if( p_cep->pfn_cb ) + p_cep->pfn_cb( p_cep->p_cid->h_al, &p_cep->cep ); + else + __cep_complete_irp( p_cep, STATUS_SUCCESS, IO_NETWORK_INCREMENT ); + + pfn_destroy_cb = p_cep->pfn_destroy_cb; + context = p_cep->cep.context; + + /* + * Release the reference for the callback and invoke the destroy + * callback if necessary. + */ + if( !cl_atomic_dec( &p_cep->ref_cnt ) && pfn_destroy_cb ) + pfn_destroy_cb( context ); + + AL_EXIT( AL_DBG_CM ); +} + + +static uint32_t +__calc_mad_timeout( + IN const uint8_t pkt_life ) +{ + /* + * Calculate the retry timeout. + * All timeout values in micro seconds are expressed as 4.096 * 2^x, + * where x is the timeout. The formula to approximates this to + * milliseconds using just shifts and subtraction is: + * timeout_ms = 67 << (x - 14) + * The results are off by 0.162%. + * + * Note that we will never return less than 1 millisecond. We also + * trap exceedingly large values to prevent wrapping. + */ + if( pkt_life > 39 ) + return ~0UL; + if( pkt_life > 14 ) + return 67 << (pkt_life - 14); + else if( pkt_life > 8 ) + return 67 >> (14 - pkt_life); + else + return 1; +} + + +/* CEP manager lock is held when calling this function. */ +static kcep_t* +__create_cep() +{ + kcep_t *p_cep; + + AL_ENTER( AL_DBG_CM ); + + p_cep = ExAllocateFromNPagedLookasideList( &gp_cep_mgr->cep_pool ); + if( !p_cep ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("Failed to allocate CEP.\n") ); + return NULL; + } + + cl_memclr( p_cep, sizeof(kcep_t) ); + + KeInitializeTimer( &p_cep->timewait_timer ); + + p_cep->state = CEP_STATE_IDLE; + + /* + * Pre-charge the reference count to 1. The code will invoke the + * destroy callback once the ref count reaches to zero. + */ + p_cep->ref_cnt = 1; + p_cep->signalled = FALSE; + + /* Find a free entry in the CID vector. */ + p_cep->p_cid = __get_lcid( &p_cep->cep.cid ); + + if( !p_cep->p_cid ) + { + ExFreeToNPagedLookasideList( &gp_cep_mgr->cep_pool, p_cep ); + AL_TRACE_EXIT( AL_DBG_ERROR, ("Failed to get CID.\n") ); + return NULL; + } + + p_cep->p_cid->modifier++; + /* + * We don't ever want a modifier of zero for the CID at index zero + * since it would result in a total CID of zero. + */ + if( !p_cep->cep.cid && !p_cep->p_cid->modifier ) + p_cep->p_cid->modifier++; + + p_cep->local_comm_id = p_cep->cep.cid | (p_cep->p_cid->modifier << 24); + p_cep->tid = p_cep->local_comm_id; + + p_cep->p_cid->p_cep = p_cep; + + ref_al_obj( &gp_cep_mgr->obj ); + + AL_EXIT( AL_DBG_CM ); + return p_cep; +} + + +static inline void +__bind_cep( + IN kcep_t* const p_cep, + IN ib_al_handle_t h_al, + IN al_pfn_cep_cb_t pfn_cb, + IN void *context ) +{ + CL_ASSERT( p_cep ); + CL_ASSERT( p_cep->p_cid ); + CL_ASSERT( h_al ); + + p_cep->p_cid->h_al = h_al; + p_cep->pfn_cb = pfn_cb; + p_cep->cep.context = context; + + /* Track the CEP in its owning AL instance. */ + cl_spinlock_acquire( &h_al->obj.lock ); + cl_qlist_insert_tail( &h_al->cep_list, &p_cep->al_item ); + cl_spinlock_release( &h_al->obj.lock ); +} + + +static inline void +__unbind_cep( + IN kcep_t* const p_cep ) +{ + CL_ASSERT( p_cep ); + CL_ASSERT( p_cep->p_cid ); + CL_ASSERT( p_cep->p_cid->h_al ); + + /* Track the CEP in its owning AL instance. */ + cl_spinlock_acquire( &p_cep->p_cid->h_al->obj.lock ); + cl_qlist_remove_item( &p_cep->p_cid->h_al->cep_list, &p_cep->al_item ); + cl_spinlock_release( &p_cep->p_cid->h_al->obj.lock ); + + /* + * Set to the internal AL handle - it needs to be non-NULL to indicate it's + * a valid entry, and it can't be a user's AL instance to prevent using a + * destroyed CEP. + */ + p_cep->p_cid->h_al = gh_al; +#ifdef _DEBUG_ + p_cep->pfn_cb = NULL; +#endif /* _DEBUG_ */ +} + + +static inline void +__calc_timewait( + IN kcep_t* const p_cep ) +{ + + /* + * Use the CEP's stored packet lifetime to calculate the time at which + * the CEP exits timewait. Packet lifetime is expressed as + * 4.096 * 2^pkt_life microseconds, and we need a timeout in 100ns + * increments. The formual using just shifts and subtraction is this: + * timeout = (41943 << (pkt_life - 10)); + * The results are off by .0001%, which should be more than adequate. + */ + if( p_cep->max_2pkt_life > 10 ) + { + p_cep->timewait_time.QuadPart = + -(41943i64 << (p_cep->max_2pkt_life - 10)); + } + else + { + p_cep->timewait_time.QuadPart = + -(41943i64 >> (10 - p_cep->max_2pkt_life)); + } + if( p_cep->target_ack_delay > 10 ) + { + p_cep->timewait_time.QuadPart -= + (41943i64 << (p_cep->target_ack_delay - 10)); + } + else + { + p_cep->timewait_time.QuadPart -= + (41943i64 >> (10 - p_cep->target_ack_delay)); + } +} + + +/* Called with CEP manager and CEP locks held. */ +static inline void +__insert_timewait( + IN kcep_t* const p_cep ) +{ + cl_qlist_insert_tail( &gp_cep_mgr->timewait_list, &p_cep->timewait_item ); + + KeSetTimer( &p_cep->timewait_timer, p_cep->timewait_time, NULL ); + + /* + * Reset the timer for half of the shortest timeout - this results + * in a worst case timeout of 150% of timewait. + */ + cl_timer_trim( &gp_cep_mgr->timewait_timer, + (uint32_t)(-p_cep->timewait_time.QuadPart / 20000) ); +} + + +static inline ib_api_status_t +__do_cep_rej( + IN kcep_t* const p_cep, + IN ib_rej_status_t rej_status, + IN const uint8_t* const p_ari, + IN uint8_t ari_len, + IN const uint8_t* const p_pdata, + IN uint8_t pdata_len ) +{ + ib_api_status_t status; + cep_agent_t *p_port_cep; + ib_mad_element_t *p_mad; + + p_port_cep = __get_cep_agent( p_cep ); + if( !p_port_cep ) + return IB_INSUFFICIENT_RESOURCES; + + status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_mad ); + if( status != IB_SUCCESS ) + return status; + + __format_mad_av( p_mad, &p_cep->av[p_cep->idx_primary] ); + + status = conn_rej_set_ari( + p_ari, ari_len, (mad_cm_rej_t*)p_mad->p_mad_buf ); + if( status != IB_SUCCESS ) + return status; + + status = conn_rej_set_pdata( + p_pdata, pdata_len, (mad_cm_rej_t*)p_mad->p_mad_buf ); + if( status != IB_SUCCESS ) + return status; + + __reject_mad( p_port_cep, p_cep, p_mad, rej_status ); + return IB_SUCCESS; +} + + +static ib_api_status_t +__cep_get_mad( + IN kcep_t* const p_cep, + IN net16_t attr_id, + OUT cep_agent_t** const pp_port_cep, + OUT ib_mad_element_t** const pp_mad ) +{ + cep_agent_t *p_port_cep; + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + p_port_cep = __get_cep_agent( p_cep ); + if( !p_port_cep ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("__get_cep_agent failed.\n") ); + return IB_INSUFFICIENT_RESOURCES; + } + + status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, pp_mad ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("ib_get_mad returned %s.\n", ib_get_err_str( status )) ); + return status; + } + + __format_mad_av( *pp_mad, &p_cep->av[p_cep->idx_primary] ); + + __format_mad_hdr( (*pp_mad)->p_mad_buf, p_cep, attr_id ); + + *pp_port_cep = p_port_cep; + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static ib_api_status_t +__format_dreq( + IN kcep_t* const p_cep, + IN const uint8_t* p_pdata OPTIONAL, + IN uint8_t pdata_len, + IN OUT ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_dreq_t *p_dreq; + + AL_ENTER( AL_DBG_CM ); + + p_dreq = (mad_cm_dreq_t*)p_mad->p_mad_buf; + + p_dreq->local_comm_id = p_cep->local_comm_id; + p_dreq->remote_comm_id = p_cep->remote_comm_id; + + conn_dreq_set_remote_qpn( p_cep->remote_qpn, p_dreq ); + + /* copy optional data */ + status = conn_dreq_set_pdata( p_pdata, pdata_len, p_dreq ); + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static ib_api_status_t +__dreq_cep( + IN kcep_t* const p_cep ) +{ + ib_api_status_t status; + cep_agent_t *p_agt; + ib_mad_element_t *p_mad; + + status = __cep_get_mad( p_cep, CM_DREQ_ATTR_ID, &p_agt, &p_mad ); + if( status != IB_SUCCESS ) + return status; + + status = __format_dreq( p_cep, NULL, 0, p_mad ); + if( status != IB_SUCCESS ) + return status; + + return __cep_send_retry( p_agt, p_cep, p_mad ); +} + + +static ib_api_status_t +__format_drep( + IN kcep_t* const p_cep, + IN const uint8_t* p_pdata OPTIONAL, + IN uint8_t pdata_len, + IN OUT mad_cm_drep_t* const p_drep ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + p_drep->local_comm_id = p_cep->local_comm_id; + p_drep->remote_comm_id = p_cep->remote_comm_id; + + /* copy optional data */ + status = conn_drep_set_pdata( p_pdata, pdata_len, p_drep ); + + /* Store the RTU MAD so we can repeat it if we get a repeated DREP. */ + if( status == IB_SUCCESS && p_drep != &p_cep->mads.drep ) + p_cep->mads.drep = *p_drep; + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static void +__drep_cep( + IN kcep_t* const p_cep ) +{ + cep_agent_t *p_agt; + ib_mad_element_t *p_mad; + + AL_ENTER( AL_DBG_CM ); + + if( __cep_get_mad( p_cep, CM_DREP_ATTR_ID, &p_agt, &p_mad ) != IB_SUCCESS ) + return; + + if( __format_drep( p_cep, NULL, 0, (mad_cm_drep_t*)p_mad->p_mad_buf ) + != IB_SUCCESS ) + { + return; + } + + __cep_send_mad( p_agt, p_mad ); + + AL_EXIT( AL_DBG_CM ); +} + + +/* Called with CEP manager lock held. */ +static int32_t +__cleanup_cep( + IN kcep_t* const p_cep ) +{ + ib_mad_element_t *p_mad; + kcep_t *p_new_cep; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_cep ); + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + /* If we've already come through here, we're done. */ + if( p_cep->state == CEP_STATE_DESTROY || + p_cep->state == CEP_STATE_DREQ_DESTROY ) + { + AL_EXIT( AL_DBG_CM ); + return -1; + } + + /* Cleanup the pending MAD list. */ + while( p_cep->p_mad_head ) + { + p_mad = p_cep->p_mad_head; + p_cep->p_mad_head = p_mad->p_next; + p_mad->p_next = NULL; + if( p_mad->send_context1 ) + { + p_new_cep = (kcep_t* __ptr64)p_mad->send_context1; + + __unbind_cep( p_new_cep ); + __cleanup_cep( p_new_cep ); + } + ib_put_mad( p_mad ); + } + + switch( p_cep->state ) + { + case CEP_STATE_PRE_REP: + case CEP_STATE_PRE_REP_MRA_SENT: + CL_ASSERT( p_cep->p_mad ); + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + /* Fall through. */ + case CEP_STATE_REQ_RCVD: + case CEP_STATE_REP_RCVD: + case CEP_STATE_REQ_MRA_SENT: + case CEP_STATE_REP_MRA_SENT: + /* Reject the connection. */ + __do_cep_rej( p_cep, IB_REJ_USER_DEFINED, NULL, 0, NULL, 0 ); + break; + + case CEP_STATE_REQ_SENT: + case CEP_STATE_REQ_MRA_RCVD: + case CEP_STATE_REP_SENT: + case CEP_STATE_REP_MRA_RCVD: + /* Cancel the send. */ + CL_ASSERT( p_cep->h_mad_svc ); + CL_ASSERT( p_cep->p_send_mad ); + ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad ); + /* Reject the connection. */ + __do_cep_rej( p_cep, IB_REJ_TIMEOUT, (uint8_t*)&p_cep->local_ca_guid, + sizeof(p_cep->local_ca_guid), NULL, 0 ); + break; + + case CEP_STATE_ESTABLISHED: + case CEP_STATE_LAP_RCVD: + case CEP_STATE_LAP_SENT: + case CEP_STATE_LAP_MRA_RCVD: + case CEP_STATE_LAP_MRA_SENT: + case CEP_STATE_PRE_APR: + case CEP_STATE_PRE_APR_MRA_SENT: + /* Disconnect the connection. */ + if( __dreq_cep( p_cep ) != IB_SUCCESS ) + break; + /* Fall through. */ + + case CEP_STATE_DREQ_SENT: + p_cep->state = CEP_STATE_DREQ_DESTROY; + AL_EXIT( AL_DBG_CM ); + return cl_atomic_dec( &p_cep->ref_cnt ); + + case CEP_STATE_DREQ_RCVD: + /* Send the DREP. */ + __drep_cep( p_cep ); + break; + + case CEP_STATE_SREQ_RCVD: + /* TODO: Reject the SIDR request. */ + break; + + case CEP_STATE_LISTEN: + /* Remove from listen map. */ + cl_rbmap_remove_item( &gp_cep_mgr->listen_map, &p_cep->listen_item ); + break; + + case CEP_STATE_PRE_REQ: + CL_ASSERT( p_cep->p_mad ); + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + /* Fall through. */ + case CEP_STATE_IDLE: + break; + + default: + AL_TRACE( AL_DBG_ERROR, ("CEP in state %d.\n", p_cep->state) ); + case CEP_STATE_TIMEWAIT: + /* Already in timewait - so all is good. */ + p_cep->state = CEP_STATE_DESTROY; + AL_EXIT( AL_DBG_CM ); + return cl_atomic_dec( &p_cep->ref_cnt ); + } + + p_cep->state = CEP_STATE_DESTROY; + __insert_timewait( p_cep ); + + AL_EXIT( AL_DBG_CM ); + return cl_atomic_dec( &p_cep->ref_cnt ); +} + + +static void +__destroy_cep( + IN kcep_t* const p_cep ) +{ + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( + p_cep->cep.cid < cl_vector_get_size( &gp_cep_mgr->cid_vector ) ); + + CL_ASSERT( p_cep->p_cid == (cep_cid_t*)cl_vector_get_ptr( + &gp_cep_mgr->cid_vector, p_cep->cep.cid ) ); + + /* Free the CID. */ + p_cep->p_cid->p_cep = (kcep_t*)(uintn_t)gp_cep_mgr->free_cid; + p_cep->p_cid->h_al = NULL; + gp_cep_mgr->free_cid = p_cep->cep.cid; + + KeCancelTimer( &p_cep->timewait_timer ); + + ExFreeToNPagedLookasideList( &gp_cep_mgr->cep_pool, p_cep ); + + deref_al_obj( &gp_cep_mgr->obj ); + + AL_EXIT( AL_DBG_CM ); +} + + +ib_api_status_t +al_create_cep( + IN ib_al_handle_t h_al, + IN al_pfn_cep_cb_t pfn_cb, + IN void *context, + OUT net32_t* const p_cid ) +{ + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_cid ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __create_cep(); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_TRACE_EXIT( AL_DBG_ERROR, ("Failed to allocate CEP.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + + __bind_cep( p_cep, h_al, pfn_cb, context ); + + *p_cid = p_cep->cep.cid; + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +al_destroy_cep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN ib_pfn_destroy_cb_t pfn_destroy_cb ) +{ + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + void *context; + int32_t ref_cnt; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + /* + * Remove the CEP from the CID vector - no further API calls + * will succeed for it. + */ + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + /* Invalid handle. */ + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_PARAMETER; + } + + context = p_cep->cep.context; + p_cep->pfn_destroy_cb = pfn_destroy_cb; + + /* Cancel any queued IRP */ + __cep_complete_irp( p_cep, STATUS_CANCELLED, IO_NO_INCREMENT ); + + __unbind_cep( p_cep ); + ref_cnt = __cleanup_cep( p_cep ); + KeReleaseInStackQueuedSpinLock( &hdl ); + + /* + * Done waiting. Release the reference so the timewait timer callback + * can finish cleaning up. + */ + if( !ref_cnt && pfn_destroy_cb ) + pfn_destroy_cb( context ); + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +al_cep_listen( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN ib_cep_listen_t* const p_listen_info ) +{ + ib_api_status_t status; + kcep_t *p_cep, *p_listen; + cl_rbmap_item_t *p_item, *p_insert_at; + boolean_t left = TRUE; + intn_t cmp; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_listen_info ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_PRE_REQ: + CL_ASSERT( p_cep->p_mad ); + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + /* Fall through. */ + case CEP_STATE_IDLE: + break; + default: + status = IB_INVALID_STATE; + goto done; + } + + /* Insert the CEP into the listen map. */ + p_item = cl_rbmap_root( &gp_cep_mgr->listen_map ); + p_insert_at = p_item; + while( p_item != cl_rbmap_end( &gp_cep_mgr->listen_map ) ) + { + p_insert_at = p_item; + + p_listen = PARENT_STRUCT( p_item, kcep_t, listen_item ); + + if( p_listen_info->svc_id == p_listen->sid ) + goto port_cmp; + + if( p_listen_info->svc_id < p_listen->sid ) + p_item = cl_rbmap_left( p_item ), left = TRUE; + else + p_item = cl_rbmap_right( p_item ), left = FALSE; + + continue; + +port_cmp: + if( p_listen_info->port_guid != IB_ALL_PORTS ) + { + if( p_listen_info->port_guid == p_listen->port_guid ) + goto pdata_cmp; + + if( p_listen_info->port_guid < p_listen->port_guid ) + p_item = cl_rbmap_left( p_item ), left = TRUE; + else + p_item = cl_rbmap_right( p_item ), left = FALSE; + + continue; + } + +pdata_cmp: + /* + * If an existing listen doesn't have a compare buffer, + * then we found a duplicate. + */ + if( !p_listen->p_cmp_buf ) + break; + + if( p_listen_info->p_cmp_buf ) + { + /* Compare length must match. */ + if( p_listen_info->cmp_len != p_listen->cmp_len ) + break; + + /* Compare offset must match. */ + if( p_listen_info->cmp_offset != p_listen->cmp_offset ) + break; + + cmp = cl_memcmp( &p_listen_info->p_cmp_buf, + p_listen->p_cmp_buf, p_listen->cmp_len ); + + if( cmp < 0 ) + p_item = cl_rbmap_left( p_item ), left = TRUE; + else if( cmp > 0 ) + p_item = cl_rbmap_right( p_item ), left = FALSE; + else + break; + + AL_TRACE( AL_DBG_CM, + ("Svc ID match but compare buffer mismatch.\n") ); + continue; + } + } + + if( p_item != cl_rbmap_end( &gp_cep_mgr->listen_map ) ) + { + /* Duplicate!!! */ + status = IB_INVALID_SETTING; + goto done; + } + + /* Set up the CEP. */ + if( p_listen_info->p_cmp_buf ) + { + p_cep->p_cmp_buf = cl_malloc( p_listen_info->cmp_len ); + if( !p_cep->p_cmp_buf ) + { + AL_TRACE( AL_DBG_ERROR, + ("Failed to allocate compare buffer.\n") ); + status = IB_INSUFFICIENT_MEMORY; + goto done; + } + + cl_memcpy( p_cep->p_cmp_buf, + p_listen_info->p_cmp_buf, p_listen_info->cmp_len ); + } + p_cep->cmp_len = p_listen_info->cmp_len; + p_cep->cmp_offset = p_listen_info->cmp_offset; + p_cep->sid = p_listen_info->svc_id; + p_cep->port_guid = p_listen_info->port_guid; + p_cep->state = CEP_STATE_LISTEN; + + cl_rbmap_insert( &gp_cep_mgr->listen_map, p_insert_at, + &p_cep->listen_item, left ); + + status = IB_SUCCESS; + +done: + KeReleaseInStackQueuedSpinLock( &hdl ); + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static cep_agent_t* +__format_path_av( + IN const ib_path_rec_t* const p_path, + OUT kcep_av_t* const p_av ) +{ + cep_agent_t* p_port_cep; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_path ); + CL_ASSERT( p_av ); + + cl_memclr( p_av, sizeof(kcep_av_t) ); + + p_port_cep = __find_port_cep( &p_path->sgid, p_path->slid, + p_path->pkey, &p_av->pkey_index ); + if( !p_port_cep ) + { + AL_EXIT( AL_DBG_CM ); + return NULL; + } + + p_av->port_guid = p_port_cep->port_guid; + + p_av->attr.port_num = p_port_cep->port_num; + + p_av->attr.sl = ib_path_rec_sl( p_path ); + p_av->attr.dlid = p_path->dlid; + + p_av->attr.grh.ver_class_flow = ib_grh_set_ver_class_flow( + 1, p_path->tclass, ib_path_rec_flow_lbl( p_path ) ); + p_av->attr.grh.hop_limit = ib_path_rec_hop_limit( p_path ); + p_av->attr.grh.src_gid = p_path->sgid; + p_av->attr.grh.dest_gid = p_path->dgid; + + p_av->attr.grh_valid = !ib_gid_is_link_local( &p_path->dgid ); + + p_av->attr.static_rate = ib_path_rec_rate( p_path ); + p_av->attr.path_bits = (uint8_t)(p_path->slid - p_port_cep->base_lid); + + /* + * Note that while we never use the connected AV attributes internally, + * we store them so we can pass them back to users. + */ + p_av->attr.conn.path_mtu = p_path->mtu; + p_av->attr.conn.local_ack_timeout = calc_lcl_ack_timeout( + ib_path_rec_pkt_life( p_path ) + 1, 0 ); + + AL_EXIT( AL_DBG_CM ); + return p_port_cep; +} + + +/* + * Formats a REQ mad's path information given a path record. + */ +static void +__format_req_path( + IN const ib_path_rec_t* const p_path, + IN const uint8_t ack_delay, + OUT req_path_info_t* const p_req_path ) +{ + AL_ENTER( AL_DBG_CM ); + + p_req_path->local_lid = p_path->slid; + p_req_path->remote_lid = p_path->dlid; + p_req_path->local_gid = p_path->sgid; + p_req_path->remote_gid = p_path->dgid; + + conn_req_path_set_flow_lbl( ib_path_rec_flow_lbl( p_path ), + p_req_path ); + conn_req_path_set_pkt_rate( ib_path_rec_rate( p_path ), + p_req_path ); + + /* Traffic class & hop limit */ + p_req_path->traffic_class = p_path->tclass; + p_req_path->hop_limit = ib_path_rec_hop_limit( p_path ); + + /* SL & Subnet Local fields */ + conn_req_path_set_svc_lvl( ib_path_rec_sl( p_path ), + p_req_path ); + conn_req_path_set_subn_lcl( + ib_gid_is_link_local( &p_path->dgid ), p_req_path ); + + conn_req_path_set_lcl_ack_timeout( + calc_lcl_ack_timeout( ib_path_rec_pkt_life( p_path ) + 1, + ack_delay ), p_req_path ); + + conn_req_path_clr_rsvd_fields( p_req_path ); + + AL_EXIT( AL_DBG_CM ); +} + + +static ib_api_status_t +__format_req( + IN kcep_t* const p_cep, + IN const ib_cm_req_t* const p_cm_req ) +{ + ib_api_status_t status; + mad_cm_req_t* p_req; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_cep ); + CL_ASSERT( p_cm_req ); + CL_ASSERT( p_cep->p_mad ); + + /* Format the MAD header. */ + __format_mad_hdr( p_cep->p_mad->p_mad_buf, p_cep, CM_REQ_ATTR_ID ); + + /* Set the addressing information in the MAD. */ + __format_mad_av( p_cep->p_mad, &p_cep->av[p_cep->idx_primary] ); + + p_req = (mad_cm_req_t*)p_cep->p_mad->p_mad_buf; + + ci_ca_lock_attr( p_cm_req->h_qp->obj.p_ci_ca ); + /* + * Store the local CA's ack timeout for use when computing + * the local ACK timeout. + */ + p_cep->local_ack_delay = + p_cm_req->h_qp->obj.p_ci_ca->p_pnp_attr->local_ack_delay; + ci_ca_unlock_attr( p_cm_req->h_qp->obj.p_ci_ca ); + + /* Format the primary path. */ + __format_req_path( p_cm_req->p_primary_path, + p_cep->local_ack_delay, &p_req->primary_path ); + + if( p_cm_req->p_alt_path ) + { + /* Format the alternate path. */ + __format_req_path( p_cm_req->p_alt_path, + p_cep->local_ack_delay, &p_req->alternate_path ); + } + else + { + cl_memclr( &p_req->alternate_path, sizeof(req_path_info_t) ); + } + + /* Set the local communication in the REQ. */ + p_req->local_comm_id = p_cep->local_comm_id; + p_req->sid = p_cm_req->svc_id; + p_req->local_ca_guid = p_cm_req->h_qp->obj.p_ci_ca->verbs.guid; + + conn_req_set_lcl_qpn( p_cep->local_qpn, p_req ); + conn_req_set_resp_res( p_cm_req->resp_res, p_req ); + conn_req_set_init_depth( p_cm_req->init_depth, p_req ); + conn_req_set_remote_resp_timeout( p_cm_req->remote_resp_timeout, p_req ); + conn_req_set_qp_type( p_cm_req->h_qp->type, p_req ); + conn_req_set_flow_ctrl( p_cm_req->flow_ctrl, p_req ); + conn_req_set_starting_psn( p_cep->rq_psn, p_req ); + + conn_req_set_lcl_resp_timeout( p_cm_req->local_resp_timeout, p_req ); + conn_req_set_retry_cnt( p_cm_req->retry_cnt, p_req ); + + p_req->pkey = p_cm_req->p_primary_path->pkey; + + conn_req_set_mtu( ib_path_rec_mtu( p_cm_req->p_primary_path ), p_req ); + conn_req_set_rnr_retry_cnt( p_cm_req->rnr_retry_cnt, p_req ); + + conn_req_set_max_cm_retries( p_cm_req->max_cm_retries, p_req ); + status = conn_req_set_pdata( + p_cm_req->p_req_pdata, p_cm_req->req_length, p_req ); + + conn_req_clr_rsvd_fields( p_req ); + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static ib_api_status_t +__save_user_req( + IN kcep_t* const p_cep, + IN const ib_cm_req_t* const p_cm_req, + OUT cep_agent_t** const pp_port_cep ) +{ + cep_agent_t *p_port_cep; + + AL_ENTER( AL_DBG_CM ); + + if( !p_cm_req->p_primary_path ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid primary path record.\n") ); + return IB_INVALID_SETTING; + } + + p_cep->sid = p_cm_req->svc_id; + + p_cep->idx_primary = 0; + + p_cep->p2p = (p_cm_req->pfn_cm_req_cb != NULL); + + if( p_cm_req->p_compare_buffer ) + { + if( !p_cm_req->compare_length || + (p_cm_req->compare_offset + p_cm_req->compare_length) > + IB_REQ_PDATA_SIZE ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_SETTING; + } + p_cep->p_cmp_buf = cl_malloc( p_cm_req->compare_length ); + if( !p_cep->p_cmp_buf ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INSUFFICIENT_MEMORY; + } + + cl_memcpy( p_cep->p_cmp_buf, + p_cm_req->p_compare_buffer, p_cm_req->compare_length ); + + p_cep->cmp_len = p_cm_req->compare_length; + p_cep->cmp_offset = p_cm_req->compare_offset; + } + else + { + p_cep->p_cmp_buf = NULL; + p_cep->cmp_len = 0; + p_cep->cmp_offset = 0; + } + p_cep->was_active = TRUE; + + /* Validate the primary path. */ + p_port_cep = __format_path_av( p_cm_req->p_primary_path, &p_cep->av[0] ); + if( !p_port_cep ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("Primary path unrealizable.\n") ); + return IB_INVALID_SETTING; + } + + p_cep->av[0].attr.conn.seq_err_retry_cnt = p_cm_req->retry_cnt; + + /* Make sure the paths will work on the desired QP. */ + if( p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != + p_cm_req->h_qp->obj.p_ci_ca->verbs.guid ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("Primary path not realizable on given QP.\n") ); + return IB_INVALID_SETTING; + } + + p_cep->local_ca_guid = p_port_cep->h_ca->obj.p_ci_ca->verbs.guid; + + *pp_port_cep = p_port_cep; + + /* + * Store the PKEY so we can ensure that alternate paths are + * on the same partition. + */ + p_cep->pkey = p_cm_req->p_primary_path->pkey; + + p_cep->max_2pkt_life = ib_path_rec_pkt_life( p_cm_req->p_primary_path ) + 1; + + if( p_cm_req->p_alt_path ) + { + /* MTUs must match since they are specified only once. */ + if( ib_path_rec_mtu( p_cm_req->p_primary_path ) != + ib_path_rec_mtu( p_cm_req->p_alt_path ) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("Mismatched primary and alternate path MTUs.\n") ); + return IB_INVALID_SETTING; + } + + /* The PKEY must match too. */ + if( p_cm_req->p_alt_path->pkey != p_cm_req->p_primary_path->pkey ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("Mismatched pimary and alternate PKEYs.\n") ); + return IB_INVALID_SETTING; + } + + p_port_cep = + __format_path_av( p_cm_req->p_alt_path, &p_cep->av[1] ); + if( p_port_cep && + p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid ) + { + /* Alternate path is not on same CA. */ + AL_TRACE_EXIT( AL_DBG_ERROR, ("Alternate path unrealizable.\n") ); + return IB_INVALID_SETTING; + } + + p_cep->av[1].attr.conn.seq_err_retry_cnt = p_cm_req->retry_cnt; + + p_cep->max_2pkt_life = max( p_cep->max_2pkt_life, + (ib_path_rec_pkt_life( p_cm_req->p_alt_path ) + 1) ); + } + else + { + cl_memclr( &p_cep->av[1], sizeof(kcep_av_t) ); + } + + p_cep->p_cid->modifier++; + /* + * We don't ever want a modifier of zero for the CID at index zero + * since it would result in a total CID of zero. + */ + if( !p_cep->cep.cid && !p_cep->p_cid->modifier ) + p_cep->p_cid->modifier++; + + /* Store pertinent information in the connection. */ + p_cep->local_comm_id = p_cep->cep.cid | (p_cep->p_cid->modifier << 24); + p_cep->remote_comm_id = 0; + + /* Cache the local QPN. */ + p_cep->local_qpn = p_cm_req->h_qp->num; + p_cep->remote_ca_guid = 0; + p_cep->remote_qpn = 0; + + /* Retry timeout is remote CM response timeout plus 2 * packet life. */ + p_cep->retry_timeout = __calc_mad_timeout( p_cep->max_2pkt_life ) + + __calc_mad_timeout( p_cm_req->remote_resp_timeout ); + + + /* Store the retry count. */ + p_cep->max_cm_retries = p_cm_req->max_cm_retries; + + /* + * Clear the maximum packet lifetime, used to calculate timewait. + * It will be set when we transition into the established state. + */ + p_cep->timewait_time.QuadPart = 0; + + p_cep->rq_psn = p_cep->local_qpn; + + p_cep->rnr_nak_timeout = p_cm_req->rnr_nak_timeout; + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +al_cep_pre_req( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_req_t* const p_cm_req, + OUT ib_qp_mod_t* const p_init ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_cm_req ); + CL_ASSERT( p_init ); + + /* TODO: Code P2P support. */ + if( p_cm_req->pfn_cm_req_cb ) + { + AL_EXIT( AL_DBG_CM ); + return IB_UNSUPPORTED; + } + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_PRE_REQ: + CL_ASSERT( p_cep->p_mad ); + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + /* Fall through. */ + case CEP_STATE_IDLE: + status = __save_user_req( p_cep, p_cm_req, &p_port_cep ); + if( status != IB_SUCCESS ) + break; + + status = + ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_cep->p_mad ); + if( status != IB_SUCCESS ) + break; + + status = __format_req( p_cep, p_cm_req ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid pdata length.\n") ); + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + break; + } + + /* Format the INIT qp modify attributes. */ + p_init->req_state = IB_QPS_INIT; + p_init->state.init.primary_port = + p_cep->av[p_cep->idx_primary].attr.port_num; + p_init->state.init.qkey = 0; + p_init->state.init.pkey_index = + p_cep->av[p_cep->idx_primary].pkey_index; + p_init->state.init.access_ctrl = IB_AC_LOCAL_WRITE; + + p_cep->state = CEP_STATE_PRE_REQ; + break; + + case CEP_STATE_TIMEWAIT: + status = IB_QP_IN_TIMEWAIT; + break; + + default: + status = IB_INVALID_STATE; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_send_req( + IN ib_al_handle_t h_al, + IN net32_t cid ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_PRE_REQ: + CL_ASSERT( p_cep->p_mad ); + p_port_cep = __get_cep_agent( p_cep ); + if( !p_port_cep ) + { + ib_put_mad( p_cep->p_mad ); + p_cep->state = CEP_STATE_IDLE; + status = IB_INVALID_SETTING; + } + else + { + status = __cep_send_retry( p_port_cep, p_cep, p_cep->p_mad ); + + if( status == IB_SUCCESS ) + p_cep->state = CEP_STATE_REQ_SENT; + else + p_cep->state = CEP_STATE_IDLE; + } + p_cep->p_mad = NULL; + break; + + default: + status = IB_INVALID_STATE; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static void +__save_user_rep( + IN kcep_t* const p_cep, + IN const ib_cm_rep_t* const p_cm_rep ) +{ + AL_ENTER( AL_DBG_CM ); + + /* Cache the local QPN. */ + p_cep->local_qpn = p_cm_rep->h_qp->num; + p_cep->rq_psn = p_cep->local_qpn; + p_cep->init_depth = p_cm_rep->init_depth; + + ci_ca_lock_attr( p_cm_rep->h_qp->obj.p_ci_ca ); + /* Check the CA's responder resource max and trim if necessary. */ + if( (p_cm_rep->h_qp->obj.p_ci_ca->p_pnp_attr->max_qp_resp_res < + p_cep->req_init_depth) ) + { + /* + * The CA cannot handle the requested responder resources. + * Set the response to the CA's maximum. + */ + p_cep->resp_res = + p_cm_rep->h_qp->obj.p_ci_ca->p_pnp_attr->max_qp_resp_res; + } + else + { + /* The CA supports the requested responder resources. */ + p_cep->resp_res = p_cep->req_init_depth; + } + ci_ca_unlock_attr( p_cm_rep->h_qp->obj.p_ci_ca ); + + p_cep->rnr_nak_timeout = p_cm_rep->rnr_nak_timeout; + + AL_EXIT( AL_DBG_CM ); +} + + +static ib_api_status_t +__format_rep( + IN kcep_t* const p_cep, + IN const ib_cm_rep_t* const p_cm_rep ) +{ + ib_api_status_t status; + mad_cm_rep_t *p_rep; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_cep ); + CL_ASSERT( p_cm_rep ); + CL_ASSERT( p_cep->p_mad ); + + /* Format the MAD header. */ + __format_mad_hdr( p_cep->p_mad->p_mad_buf, p_cep, CM_REP_ATTR_ID ); + + /* Set the addressing information in the MAD. */ + __format_mad_av( p_cep->p_mad, &p_cep->av[p_cep->idx_primary] ); + + p_rep = (mad_cm_rep_t*)p_cep->p_mad->p_mad_buf; + + p_rep->local_comm_id = p_cep->local_comm_id; + p_rep->remote_comm_id = p_cep->remote_comm_id; + conn_rep_set_lcl_qpn( p_cep->local_qpn, p_rep ); + conn_rep_set_starting_psn( p_cep->rq_psn, p_rep ); + + if( p_cm_rep->failover_accepted != IB_FAILOVER_ACCEPT_SUCCESS ) + { + /* + * Failover rejected - clear the alternate AV information. + * Note that at this point, the alternate is always at index 1. + */ + cl_memclr( &p_cep->av[1], sizeof(kcep_av_t) ); + } + else if( !p_cep->av[1].port_guid ) + { + /* + * Always reject alternate path if it's zero. We might + * have cleared the AV because it was unrealizable when + * processing the REQ. + */ + conn_rep_set_failover( IB_FAILOVER_ACCEPT_ERROR, p_rep ); + } + else + { + conn_rep_set_failover( p_cm_rep->failover_accepted, p_rep ); + } + + p_rep->resp_resources = p_cep->resp_res; + + ci_ca_lock_attr( p_cm_rep->h_qp->obj.p_ci_ca ); + conn_rep_set_target_ack_delay( + p_cm_rep->h_qp->obj.p_ci_ca->p_pnp_attr->local_ack_delay, p_rep ); + ci_ca_unlock_attr( p_cm_rep->h_qp->obj.p_ci_ca ); + + p_rep->initiator_depth = p_cep->init_depth; + + conn_rep_set_e2e_flow_ctl( p_cm_rep->flow_ctrl, p_rep ); + + conn_rep_set_rnr_retry_cnt( + (uint8_t)(p_cm_rep->rnr_retry_cnt & 0x07), p_rep ); + + /* Local CA guid should have been set when processing the received REQ. */ + CL_ASSERT( p_cep->local_ca_guid ); + p_rep->local_ca_guid = p_cep->local_ca_guid; + + status = conn_rep_set_pdata( + p_cm_rep->p_rep_pdata, p_cm_rep->rep_length, p_rep ); + + conn_rep_clr_rsvd_fields( p_rep ); + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_pre_rep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN void *context, + IN const ib_cm_rep_t* const p_cm_rep, + OUT ib_qp_mod_t* const p_init ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_cm_rep ); + CL_ASSERT( p_init ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_PRE_REP: + case CEP_STATE_PRE_REP_MRA_SENT: + CL_ASSERT( p_cep->p_mad ); + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + /* Fall through. */ + case CEP_STATE_REQ_RCVD: + case CEP_STATE_REQ_MRA_SENT: + CL_ASSERT( !p_cep->p_mad ); + status = + __cep_get_mad( p_cep, CM_REP_ATTR_ID, &p_port_cep, &p_cep->p_mad ); + if( status != IB_SUCCESS ) + break; + + __save_user_rep( p_cep, p_cm_rep ); + + status = __format_rep( p_cep, p_cm_rep ); + if( status != IB_SUCCESS ) + { + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + break; + } + + /* Format the INIT qp modify attributes. */ + p_init->req_state = IB_QPS_INIT; + p_init->state.init.primary_port = + p_cep->av[p_cep->idx_primary].attr.port_num; + p_init->state.init.qkey = 0; + p_init->state.init.pkey_index = + p_cep->av[p_cep->idx_primary].pkey_index; + p_init->state.init.access_ctrl = IB_AC_LOCAL_WRITE; + + p_cep->cep.context = context; + + /* Just OR in the PREP bit into the state. */ + p_cep->state |= CEP_STATE_PREP; + break; + + default: + status = IB_INVALID_STATE; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_send_rep( + IN ib_al_handle_t h_al, + IN net32_t cid ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_PRE_REP: + case CEP_STATE_PRE_REP_MRA_SENT: + CL_ASSERT( p_cep->p_mad ); + p_port_cep = __get_cep_agent( p_cep ); + if( !p_port_cep ) + { + ib_put_mad( p_cep->p_mad ); + p_cep->state = CEP_STATE_IDLE; + status = IB_INSUFFICIENT_RESOURCES; + } + else + { + status = __cep_send_retry( p_port_cep, p_cep, p_cep->p_mad ); + if( status == IB_SUCCESS ) + { + p_cep->state = CEP_STATE_REP_SENT; + } + else + { + __remove_cep( p_cep ); + p_cep->state = CEP_STATE_IDLE; + } + } + p_cep->p_mad = NULL; + break; + + default: + status = IB_INVALID_STATE; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static inline ib_api_status_t +__format_rtu( + IN kcep_t* const p_cep, + IN const uint8_t* p_pdata OPTIONAL, + IN uint8_t pdata_len, + IN OUT ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_rtu_t *p_rtu; + + AL_ENTER( AL_DBG_CM ); + + p_rtu = (mad_cm_rtu_t*)p_mad->p_mad_buf; + + p_rtu->local_comm_id = p_cep->local_comm_id; + p_rtu->remote_comm_id = p_cep->remote_comm_id; + + /* copy optional data */ + status = conn_rtu_set_pdata( p_pdata, pdata_len, p_rtu ); + + /* Store the RTU MAD so we can repeat it if we get a repeated REP. */ + if( status == IB_SUCCESS ) + p_cep->mads.rtu = *p_rtu; + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_rtu( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const uint8_t* p_pdata OPTIONAL, + IN uint8_t pdata_len ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + ib_mad_element_t *p_mad; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_REP_RCVD: + case CEP_STATE_REP_MRA_SENT: + status = __cep_get_mad( p_cep, CM_RTU_ATTR_ID, &p_port_cep, &p_mad ); + if( status != IB_SUCCESS ) + break; + + status = __format_rtu( p_cep, p_pdata, pdata_len, p_mad ); + if( status != IB_SUCCESS ) + { + ib_put_mad( p_mad ); + break; + } + + /* Update the timewait time. */ + __calc_timewait( p_cep ); + + p_cep->state = CEP_STATE_ESTABLISHED; + + __cep_send_mad( p_port_cep, p_mad ); + /* Send failures will get another chance if we receive a repeated REP. */ + status = IB_SUCCESS; + break; + + default: + status = IB_INVALID_STATE; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_rej( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN ib_rej_status_t rej_status, + IN const uint8_t* const p_ari, + IN uint8_t ari_len, + IN const uint8_t* const p_pdata, + IN uint8_t pdata_len ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_REQ_RCVD: + case CEP_STATE_REQ_MRA_SENT: + status = __do_cep_rej( + p_cep, rej_status, p_ari, ari_len, p_pdata, pdata_len ); + __remove_cep( p_cep ); + p_cep->state = CEP_STATE_IDLE; + break; + + case CEP_STATE_REP_RCVD: + case CEP_STATE_REP_MRA_SENT: + status = __do_cep_rej( + p_cep, rej_status, p_ari, ari_len, p_pdata, pdata_len ); + p_cep->state = CEP_STATE_TIMEWAIT; + __insert_timewait( p_cep ); + break; + + default: + status = IB_INVALID_STATE; + } + + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static ib_api_status_t +__format_mra( + IN kcep_t* const p_cep, + IN const uint8_t msg_mraed, + IN const ib_cm_mra_t* const p_cm_mra, + IN OUT ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_mra_t *p_mra; + + AL_ENTER( AL_DBG_CM ); + + p_mra = (mad_cm_mra_t*)p_mad->p_mad_buf; + + conn_mra_set_msg_mraed( msg_mraed, p_mra ); + + p_mra->local_comm_id = p_cep->local_comm_id; + p_mra->remote_comm_id = p_cep->remote_comm_id; + + conn_mra_set_svc_timeout( p_cm_mra->svc_timeout, p_mra ); + status = conn_mra_set_pdata( + p_cm_mra->p_mra_pdata, p_cm_mra->mra_length, p_mra ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_CM ); + return status; + } + conn_mra_clr_rsvd_fields( p_mra ); + + /* Save the MRA so we can repeat it if we get a repeated message. */ + p_cep->mads.mra = *p_mra; + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +al_cep_mra( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_mra_t* const p_cm_mra ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + ib_mad_element_t *p_mad; + uint8_t msg_mraed; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_cm_mra ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_REQ_RCVD: + case CEP_STATE_PRE_REP: + msg_mraed = 0; + break; + + case CEP_STATE_REP_RCVD: + msg_mraed = 1; + break; + + case CEP_STATE_PRE_APR: + case CEP_STATE_LAP_RCVD: + msg_mraed = 2; + break; + + default: + status = IB_INVALID_STATE; + goto done; + } + + status = __cep_get_mad( p_cep, CM_MRA_ATTR_ID, &p_port_cep, &p_mad ); + if( status != IB_SUCCESS ) + goto done; + + status = __format_mra( p_cep, msg_mraed, p_cm_mra, p_mad ); + if( status != IB_SUCCESS ) + { + ib_put_mad( p_mad ); + goto done; + } + + p_cep->state |= CEP_STATE_MRA; + + __cep_send_mad( p_port_cep, p_mad ); + status = IB_SUCCESS; + +done: + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + + +static ib_api_status_t +__format_lap( + IN kcep_t* const p_cep, + IN const ib_cm_lap_t* const p_cm_lap, + IN OUT ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_lap_t *p_lap; + + AL_ENTER( AL_DBG_CM ); + + __format_mad_hdr( p_mad->p_mad_buf, p_cep, CM_LAP_ATTR_ID ); + + __format_mad_av( p_mad, &p_cep->av[p_cep->idx_primary] ); + + p_lap = (mad_cm_lap_t*)p_mad->p_mad_buf; + + p_lap->alternate_path.local_lid = p_cm_lap->p_alt_path->slid; + p_lap->alternate_path.remote_lid = p_cm_lap->p_alt_path->dlid; + p_lap->alternate_path.local_gid = p_cm_lap->p_alt_path->sgid; + p_lap->alternate_path.remote_gid = p_cm_lap->p_alt_path->dgid; + + /* Set Flow Label and Packet Rate */ + conn_lap_path_set_flow_lbl( + ib_path_rec_flow_lbl( p_cm_lap->p_alt_path ), &p_lap->alternate_path ); + conn_lap_path_set_tclass( + p_cm_lap->p_alt_path->tclass, &p_lap->alternate_path ); + + p_lap->alternate_path.hop_limit = + ib_path_rec_hop_limit( p_cm_lap->p_alt_path ); + conn_lap_path_set_pkt_rate( + ib_path_rec_rate( p_cm_lap->p_alt_path ), &p_lap->alternate_path ); + + /* Set SL and Subnet Local */ + conn_lap_path_set_svc_lvl( + ib_path_rec_sl( p_cm_lap->p_alt_path ), &p_lap->alternate_path ); + conn_lap_path_set_subn_lcl( + ib_gid_is_link_local( &p_cm_lap->p_alt_path->dgid ), + &p_lap->alternate_path ); + + conn_lap_path_set_lcl_ack_timeout( + calc_lcl_ack_timeout( ib_path_rec_pkt_life( p_cm_lap->p_alt_path ) + 1, + p_cep->local_ack_delay), &p_lap->alternate_path ); + + conn_lap_path_clr_rsvd_fields( &p_lap->alternate_path ); + + p_lap->local_comm_id = p_cep->local_comm_id; + p_lap->remote_comm_id = p_cep->remote_comm_id; + conn_lap_set_remote_qpn( p_cep->remote_qpn, p_lap ); + conn_lap_set_resp_timeout( p_cm_lap->remote_resp_timeout, p_lap ); + + status = conn_lap_set_pdata( + p_cm_lap->p_lap_pdata, p_cm_lap->lap_length, p_lap ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("lap pdata invalid.\n") ); + return status; + } + + conn_lap_clr_rsvd_fields( p_lap ); + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +al_cep_lap( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_lap_t* const p_cm_lap ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + ib_mad_element_t *p_mad; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_cm_lap ); + CL_ASSERT( p_cm_lap->p_alt_path ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_ESTABLISHED: + if( !p_cep->was_active ) + { + /* Only the side that took the active role can initialte a LAP. */ + AL_TRACE( AL_DBG_ERROR, + ("Only the active side of a connection can initiate a LAP.\n") ); + status = IB_INVALID_STATE; + break; + } + + /* + * Format the AV information - store in the temporary location until we + * get the APR indicating acceptance. + */ + p_port_cep = __format_path_av( p_cm_lap->p_alt_path, &p_cep->alt_av ); + if( !p_port_cep ) + { + AL_TRACE( AL_DBG_ERROR, ("Alternate path invalid!\n") ); + status = IB_INVALID_SETTING; + break; + } + + p_cep->alt_av.attr.conn.seq_err_retry_cnt = + p_cep->av[p_cep->idx_primary].attr.conn.seq_err_retry_cnt; + p_cep->alt_av.attr.conn.rnr_retry_cnt = + p_cep->av[p_cep->idx_primary].attr.conn.rnr_retry_cnt; + + if( p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid ) + { + AL_TRACE( AL_DBG_ERROR, + ("Alternate CA GUID different from current!\n") ); + status = IB_INVALID_SETTING; + break; + } + + /* Store the alternate path info temporarilly. */ + p_cep->alt_2pkt_life = ib_path_rec_pkt_life( p_cm_lap->p_alt_path ) + 1; + + status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_mad ); + if( status != IB_SUCCESS ) + break; + + status = __format_lap( p_cep, p_cm_lap, p_mad ); + if( status != IB_SUCCESS ) + break; + + status = __cep_send_retry( p_port_cep, p_cep, p_mad ); + if( status == IB_SUCCESS ) + p_cep->state = CEP_STATE_LAP_SENT; + break; + + default: + status = IB_INVALID_STATE; + break; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static ib_api_status_t +__format_apr( + IN kcep_t* const p_cep, + IN const ib_cm_apr_t* const p_cm_apr, + IN OUT ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_apr_t *p_apr; + + AL_ENTER( AL_DBG_CM ); + + p_apr = (mad_cm_apr_t*)p_mad->p_mad_buf; + + p_apr->local_comm_id = p_cep->local_comm_id; + p_apr->remote_comm_id = p_cep->remote_comm_id; + p_apr->status = p_cm_apr->apr_status; + + status = conn_apr_set_apr_info( p_cm_apr->p_info->data, + p_cm_apr->info_length, p_apr ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("apr_info invalid\n") ); + return status; + } + + status = conn_apr_set_pdata( p_cm_apr->p_apr_pdata, + p_cm_apr->apr_length, p_apr ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("apr pdata invalid\n") ); + return status; + } + + conn_apr_clr_rsvd_fields( p_apr ); + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +al_cep_pre_apr( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_apr_t* const p_cm_apr, + OUT ib_qp_mod_t* const p_apr ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_cm_apr ); + CL_ASSERT( p_apr ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_PRE_APR: + case CEP_STATE_PRE_APR_MRA_SENT: + CL_ASSERT( p_cep->p_mad ); + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + /* Fall through. */ + case CEP_STATE_LAP_RCVD: + case CEP_STATE_LAP_MRA_SENT: + CL_ASSERT( !p_cep->p_mad ); + status = __cep_get_mad( p_cep, CM_APR_ATTR_ID, &p_port_cep, &p_cep->p_mad ); + if( status != IB_SUCCESS ) + break; + + status = __format_apr( p_cep, p_cm_apr, p_cep->p_mad ); + if( status != IB_SUCCESS ) + { + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + break; + } + + if( !p_cm_apr->apr_status ) + { + /* + * Copy the temporary AV and port GUID information into + * the alternate path. + */ + p_cep->av[((p_cep->idx_primary + 1) & 0x1)] = p_cep->alt_av; + + /* Update our maximum packet lifetime. */ + p_cep->max_2pkt_life = + max( p_cep->max_2pkt_life, p_cep->alt_2pkt_life ); + + /* Update our timewait time. */ + __calc_timewait( p_cep ); + + /* Fill in the QP attributes. */ + cl_memclr( p_apr, sizeof(ib_qp_mod_t) ); + p_apr->req_state = IB_QPS_RTS; + p_apr->state.rts.opts = + IB_MOD_QP_ALTERNATE_AV | IB_MOD_QP_APM_STATE; + p_apr->state.rts.alternate_av = p_cep->alt_av.attr; + p_apr->state.rts.apm_state = IB_APM_REARM; + } + + p_cep->state |= CEP_STATE_PREP; + status = IB_SUCCESS; + break; + + default: + status = IB_INVALID_STATE; + break; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_send_apr( + IN ib_al_handle_t h_al, + IN net32_t cid ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_PRE_APR: + case CEP_STATE_PRE_APR_MRA_SENT: + CL_ASSERT( p_cep->p_mad ); + p_port_cep = __get_cep_agent( p_cep ); + if( !p_port_cep ) + { + ib_put_mad( p_cep->p_mad ); + status = IB_INSUFFICIENT_RESOURCES; + } + else + { + p_cep->state = CEP_STATE_ESTABLISHED; + + __cep_send_mad( p_port_cep, p_cep->p_mad ); + status = IB_SUCCESS; + } + p_cep->p_mad = NULL; + break; + + default: + status = IB_INVALID_STATE; + break; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_dreq( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const uint8_t* const p_pdata, + IN const uint8_t pdata_len ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + ib_mad_element_t *p_mad; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_ESTABLISHED: + case CEP_STATE_LAP_SENT: + case CEP_STATE_LAP_RCVD: + case CEP_STATE_LAP_MRA_SENT: + case CEP_STATE_LAP_MRA_RCVD: + status = __cep_get_mad( p_cep, CM_DREQ_ATTR_ID, &p_port_cep, &p_mad ); + if( status != IB_SUCCESS ) + break; + + status = __format_dreq( p_cep, p_pdata, pdata_len, p_mad ); + if( status != IB_SUCCESS ) + { + AL_TRACE( AL_DBG_ERROR, + ("__format_dreq returned %s.\n", ib_get_err_str( status )) ); + break; + } + + if( __cep_send_retry( p_port_cep, p_cep, p_mad ) == IB_SUCCESS ) + { + p_cep->state = CEP_STATE_DREQ_SENT; + } + else + { + p_cep->state = CEP_STATE_TIMEWAIT; + __insert_timewait( p_cep ); + } + + status = IB_SUCCESS; + break; + + default: + status = IB_INVALID_STATE; + break; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_drep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_drep_t* const p_cm_drep ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + ib_mad_element_t *p_mad; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_cm_drep ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_DREQ_RCVD: + status = __cep_get_mad( p_cep, CM_DREP_ATTR_ID, &p_port_cep, &p_mad ); + if( status != IB_SUCCESS ) + break; + + status = __format_drep( p_cep, p_cm_drep->p_drep_pdata, + p_cm_drep->drep_length, (mad_cm_drep_t*)p_mad->p_mad_buf ); + if( status != IB_SUCCESS ) + break; + + __cep_send_mad( p_port_cep, p_mad ); + p_cep->state = CEP_STATE_TIMEWAIT; + __insert_timewait( p_cep ); + break; + + default: + status = IB_INVALID_STATE; + break; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_migrate( + IN ib_al_handle_t h_al, + IN net32_t cid ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_ESTABLISHED: + case CEP_STATE_LAP_SENT: + case CEP_STATE_LAP_RCVD: + case CEP_STATE_LAP_MRA_SENT: + case CEP_STATE_LAP_MRA_RCVD: + if( p_cep->av[(p_cep->idx_primary + 1) & 0x1].port_guid ) + { + p_cep->idx_primary++; + p_cep->idx_primary &= 0x1; + status = IB_SUCCESS; + break; + } + + AL_TRACE( AL_DBG_ERROR, ("No alternate path avaialble.\n") ); + + /* Fall through. */ + default: + status = IB_INVALID_STATE; + break; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_established( + IN ib_al_handle_t h_al, + IN net32_t cid ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_REP_SENT: + case CEP_STATE_REP_MRA_RCVD: + CL_ASSERT( p_cep->p_send_mad ); + ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad ); + p_cep->p_send_mad = NULL; + p_cep->state = CEP_STATE_ESTABLISHED; + status = IB_SUCCESS; + break; + + default: + status = IB_INVALID_STATE; + break; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_get_rtr_attr( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT ib_qp_mod_t* const p_rtr ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_rtr ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_PRE_REP: + case CEP_STATE_PRE_REP_MRA_SENT: + case CEP_STATE_REP_SENT: + case CEP_STATE_REP_MRA_RCVD: + case CEP_STATE_REP_RCVD: + case CEP_STATE_REP_MRA_SENT: + case CEP_STATE_ESTABLISHED: + cl_memclr( p_rtr, sizeof(ib_qp_mod_t) ); + p_rtr->req_state = IB_QPS_RTR; + + /* Required params. */ + p_rtr->state.rtr.rq_psn = p_cep->rq_psn; + p_rtr->state.rtr.dest_qp = p_cep->remote_qpn; + p_rtr->state.rtr.primary_av = p_cep->av[p_cep->idx_primary].attr; + p_rtr->state.rtr.resp_res = p_cep->resp_res; + p_rtr->state.rtr.rnr_nak_timeout = p_cep->rnr_nak_timeout; + + /* Optional params. */ + p_rtr->state.rtr.opts = 0; + if( p_cep->av[(p_cep->idx_primary + 1) & 0x1].port_guid ) + { + p_rtr->state.rtr.opts |= IB_MOD_QP_ALTERNATE_AV; + p_rtr->state.rtr.alternate_av = + p_cep->av[(p_cep->idx_primary + 1) & 0x1].attr; + } + status = IB_SUCCESS; + break; + + default: + status = IB_INVALID_STATE; + break; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_get_rts_attr( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT ib_qp_mod_t* const p_rts ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_rts ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_REQ_SENT: + case CEP_STATE_REQ_RCVD: + case CEP_STATE_REQ_MRA_SENT: + case CEP_STATE_REQ_MRA_RCVD: + case CEP_STATE_REP_SENT: + case CEP_STATE_REP_RCVD: + case CEP_STATE_REP_MRA_SENT: + case CEP_STATE_REP_MRA_RCVD: + case CEP_STATE_PRE_REP: + case CEP_STATE_PRE_REP_MRA_SENT: + case CEP_STATE_ESTABLISHED: + cl_memclr( p_rts, sizeof(ib_qp_mod_t) ); + p_rts->req_state = IB_QPS_RTS; + + /* Required params. */ + p_rts->state.rts.sq_psn = p_cep->sq_psn; + p_rts->state.rts.retry_cnt = + p_cep->av[p_cep->idx_primary].attr.conn.seq_err_retry_cnt; + p_rts->state.rts.rnr_retry_cnt = + p_cep->av[p_cep->idx_primary].attr.conn.rnr_retry_cnt; + p_rts->state.rts.local_ack_timeout = + p_cep->av[p_cep->idx_primary].attr.conn.local_ack_timeout; + p_rts->state.rts.init_depth = p_cep->init_depth; + + /* Optional params. */ + p_rts->state.rts.opts = 0; + if( p_cep->av[(p_cep->idx_primary + 1) & 0x1].port_guid ) + { + p_rts->state.rts.opts = + IB_MOD_QP_ALTERNATE_AV | IB_MOD_QP_APM_STATE; + p_rts->state.rts.apm_state = IB_APM_REARM; + p_rts->state.rts.alternate_av = + p_cep->av[(p_cep->idx_primary + 1) & 0x1].attr; + } + status = IB_SUCCESS; + break; + + default: + status = IB_INVALID_STATE; + break; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_get_timewait( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT uint64_t* const p_timewait_us ) +{ + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + *p_timewait_us = p_cep->timewait_time.QuadPart / 10; + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +al_cep_poll( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN OUT ib_cep_t* const p_new_cep, + OUT ib_mad_element_t** const pp_mad ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_new_cep ); + CL_ASSERT( pp_mad ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_cep->p_mad_head ) + { + p_cep->signalled = FALSE; + status = IB_NOT_DONE; + goto done; + } + + /* Set the MAD. */ + *pp_mad = p_cep->p_mad_head; + p_cep->p_mad_head = p_cep->p_mad_head->p_next; + (*pp_mad)->p_next = NULL; + + /* We're done with the input CEP. Reuse the variable */ + p_cep = (kcep_t* __ptr64)(*pp_mad)->send_context1; + if( p_cep ) + { + *p_new_cep = p_cep->cep; + } + else + { + p_new_cep->context = NULL; + p_new_cep->cid = AL_INVALID_CID; + } + + status = IB_SUCCESS; + +done: + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static void +__cep_cancel_irp( + IN DEVICE_OBJECT* p_dev_obj, + IN IRP* p_irp ) +{ + net32_t cid; + ib_al_handle_t h_al; + KLOCK_QUEUE_HANDLE hdl; + kcep_t *p_cep; + + AL_ENTER( AL_DBG_CM ); + + UNUSED_PARAM( p_dev_obj ); + CL_ASSERT( p_irp ); + + cid = (net32_t)(size_t)p_irp->Tail.Overlay.DriverContext[0]; + h_al = (ib_al_handle_t)p_irp->Tail.Overlay.DriverContext[1]; + CL_ASSERT( h_al ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( p_cep ) + __cep_complete_irp( p_cep, STATUS_CANCELLED, IO_NO_INCREMENT ); + + KeReleaseInStackQueuedSpinLock( &hdl ); + + IoReleaseCancelSpinLock( p_irp->CancelIrql ); + + AL_EXIT( AL_DBG_CM ); +} + + +NTSTATUS +al_cep_queue_irp( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN IRP* const p_irp ) +{ + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_irp ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return STATUS_INVALID_PARAMETER; + } + + /* + * Store the CID an AL handle in the IRP's driver context + * so we can cancel it. + */ + p_irp->Tail.Overlay.DriverContext[0] = (void*)(size_t)cid; + p_irp->Tail.Overlay.DriverContext[1] = (void*)h_al; +#pragma warning(push, 3) + IoSetCancelRoutine( p_irp, __cep_cancel_irp ); +#pragma warning(pop) + IoMarkIrpPending( p_irp ); + + /* Always dequeue and complete whatever IRP is there. */ + __cep_complete_irp( p_cep, STATUS_CANCELLED, IO_NO_INCREMENT ); + + InterlockedExchangePointer( &p_cep->p_irp, p_irp ); + + /* Complete the IRP if there are MADs to be reaped. */ + if( p_cep->p_mad_head ) + __cep_complete_irp( p_cep, STATUS_SUCCESS, IO_NETWORK_INCREMENT ); + + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return STATUS_PENDING; +} + + +void +al_cep_cleanup_al( + IN const ib_al_handle_t h_al ) +{ + cl_list_item_t *p_item; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + /* Destroy all CEPs associated with the input instance of AL. */ + cl_spinlock_acquire( &h_al->obj.lock ); + for( p_item = cl_qlist_head( &h_al->cep_list ); + p_item != cl_qlist_end( &h_al->cep_list ); + p_item = cl_qlist_head( &h_al->cep_list ) ) + { + /* + * Note that we don't walk the list - we can't hold the AL + * lock when cleaning up its CEPs because the cleanup path + * takes the CEP's lock. We always want to take the CEP + * before the AL lock to prevent any possibilities of deadlock. + * + * So we just get the CID, and then release the AL lock and try to + * destroy. This should unbind the CEP from the AL instance and + * remove it from the list, allowing the next CEP to be cleaned up + * in the next pass through. + */ + cid = PARENT_STRUCT( p_item, kcep_t, al_item )->cep.cid; + cl_spinlock_release( &h_al->obj.lock ); + al_destroy_cep( h_al, cid, NULL ); + cl_spinlock_acquire( &h_al->obj.lock ); + } + cl_spinlock_release( &h_al->obj.lock ); + + AL_EXIT( AL_DBG_CM ); +} diff --git a/trunk/core/al/kernel/al_dev.c b/trunk/core/al/kernel/al_dev.c index 4ed4695e..f79702b2 100644 --- a/trunk/core/al/kernel/al_dev.c +++ b/trunk/core/al/kernel/al_dev.c @@ -250,15 +250,7 @@ __proxy_cleanup_map( IN al_dev_open_context_t *p_context ) { al_handle_t *p_h; - ib_cm_handle_t h_cm; - union _u - { - ib_cm_rej_t cm_rej; - ib_cm_drep_t cm_drep; - ib_cm_apr_t cm_apr; - } u; size_t i; - uint32_t cm_subtype; CL_ENTER( AL_DBG_DEV, g_al_dbg_lvl ); @@ -282,40 +274,6 @@ __proxy_cleanup_map( al_hdl_free( p_context->h_al, i ); break; - case AL_OBJ_TYPE_H_CONN: - h_cm = (ib_cm_handle_t)p_h->p_obj; - cm_subtype = AL_SUBTYPE( p_h->type ); - al_hdl_free( p_context->h_al, i ); - cl_spinlock_release( &p_context->h_al->obj.lock ); - switch( cm_subtype ) - { - case AL_OBJ_SUBTYPE_REQ: - case AL_OBJ_SUBTYPE_REP: - /* Reject any outstanding connections. */ - cl_memclr( &u.cm_rej, sizeof( ib_cm_rej_t ) ); - u.cm_rej.rej_status = IB_REJ_TIMEOUT; - ib_cm_rej( h_cm, &u.cm_rej ); - break; - - case AL_OBJ_SUBTYPE_DREQ: - /* Issue a disconnect reply to any requests. */ - cl_memclr( &u.cm_drep, sizeof( ib_cm_drep_t ) ); - ib_cm_drep( h_cm, &u.cm_drep ); - break; - - case AL_OBJ_SUBTYPE_LAP: - /* Reject the LAP. */ - cl_memclr( &u.cm_apr, sizeof( ib_cm_apr_t ) ); - u.cm_apr.apr_status = IB_AP_REJECT; - ib_cm_apr( h_cm, &u.cm_apr ); - break; - - default: - break; - } - cl_spinlock_acquire( &p_context->h_al->obj.lock ); - break; - case AL_OBJ_TYPE_H_SA_REQ: al_cancel_sa_req( (al_sa_req_t*)p_h->p_obj ); break; @@ -475,8 +433,10 @@ al_dev_ioctl( cl_status = proxy_ioctl( h_ioctl, &ret_bytes ); else if( IS_VERBS_IOCTL(cl_ioctl_ctl_code( h_ioctl )) ) cl_status = verbs_ioctl( h_ioctl, &ret_bytes ); - else if( IS_CM_IOCTL(cl_ioctl_ctl_code( h_ioctl )) ) - cl_status = cm_ioctl( h_ioctl, &ret_bytes ); + //else if( IS_CM_IOCTL(cl_ioctl_ctl_code( h_ioctl )) ) + // cl_status = cm_ioctl( h_ioctl, &ret_bytes ); + else if( IS_CEP_IOCTL(cl_ioctl_ctl_code( h_ioctl )) ) + cl_status = cep_ioctl( h_ioctl, &ret_bytes ); else if( IS_AL_IOCTL(cl_ioctl_ctl_code( h_ioctl )) ) cl_status = al_ioctl( h_ioctl, &ret_bytes ); else if( IS_SUBNET_IOCTL(cl_ioctl_ctl_code( h_ioctl )) ) diff --git a/trunk/core/al/kernel/al_ioc_pnp.c b/trunk/core/al/kernel/al_ioc_pnp.c index dce54049..a017b659 100644 --- a/trunk/core/al/kernel/al_ioc_pnp.c +++ b/trunk/core/al/kernel/al_ioc_pnp.c @@ -257,17 +257,18 @@ typedef struct _iou_node *********/ +#pragma warning(disable:4324) typedef struct _iou_ioc { cl_map_item_t map_item; iou_node_t *p_iou; uint8_t slot; - uint8_t pad[7]; /* Align IOC profile on 64-bit boundary */ ib_ioc_profile_t profile; uint8_t num_valid_entries; ib_svc_entry_t *p_svc_entries; } iou_ioc_t; +#pragma warning(default:4324) typedef enum _sweep_state diff --git a/trunk/core/al/kernel/al_mad_pool.c b/trunk/core/al/kernel/al_mad_pool.c index 075a8e6c..2ca54696 100644 --- a/trunk/core/al/kernel/al_mad_pool.c +++ b/trunk/core/al/kernel/al_mad_pool.c @@ -733,6 +733,7 @@ get_mad_send( p_mad_send->mad_send.h_av = NULL; p_mad_send->mad_send.retry_cnt = 0; p_mad_send->mad_send.retry_time = 0; + p_mad_send->mad_send.delay = 0; p_mad_send->h_pool = p_mad_item->pool_key->h_pool; ref_al_obj( &p_mad_item->pool_key->h_pool->obj ); diff --git a/trunk/core/al/kernel/al_mgr.c b/trunk/core/al/kernel/al_mgr.c index 27216049..a911f6c7 100644 --- a/trunk/core/al/kernel/al_mgr.c +++ b/trunk/core/al/kernel/al_mgr.c @@ -40,7 +40,7 @@ #include #include "al.h" -#include "al_cm.h" +#include "al_cm_cep.h" #include "al_debug.h" #include "al_dm.h" #include "al_mad_pool.h" @@ -179,7 +179,7 @@ create_al_mgr() } /* Initialize CM */ - status = create_cm( &gp_al_mgr->obj ); + status = create_cep_mgr( &gp_al_mgr->obj ); if( status != IB_SUCCESS ) { gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); @@ -394,7 +394,7 @@ ib_open_al( cl_qlist_init( &h_al->mad_list ); cl_qlist_init( &h_al->key_list ); cl_qlist_init( &h_al->query_list ); - cl_qlist_init( &h_al->conn_list ); + cl_qlist_init( &h_al->cep_list ); cl_vector_construct( &h_al->hdl_vector ); @@ -626,92 +626,3 @@ al_hdl_get( return p_obj; } - -al_conn_t* -al_hdl_ref_conn( - IN const ib_al_handle_t h_al, - IN const uint64_t hdl, - IN const uint32_t sub_type ) -{ - al_handle_t *p_h; - al_conn_t *p_conn; - - cl_spinlock_acquire( &h_al->obj.lock ); - - /* Validate index. */ - if( hdl >= cl_vector_get_size( &h_al->hdl_vector ) ) - { - cl_spinlock_release( &h_al->obj.lock ); - return NULL; - } - - /* Get the specified entry. */ - p_h = (al_handle_t*)cl_vector_get_ptr( &h_al->hdl_vector, (size_t)hdl ); - - /* - * Make sure that the handle is valid and the correct type. Note that we - * support having multiple possible subtypes provided, and check against - * any of them. - */ - if( (AL_BASE_TYPE( p_h->type ) != AL_OBJ_TYPE_H_CONN) || - ((AL_SUBTYPE( p_h->type ) & sub_type) == 0) ) - { - cl_spinlock_release( &h_al->obj.lock ); - return NULL; - } - - p_conn = (al_conn_t*)p_h->p_obj; - - __ref_conn( p_conn ); - - cl_spinlock_release( &h_al->obj.lock ); - return p_conn; -} - - -al_conn_t* -al_hdl_get_conn( - IN const ib_al_handle_t h_al, - IN const uint64_t hdl, - IN const uint32_t sub_type ) -{ - al_handle_t *p_h; - al_conn_t *p_conn; - - cl_spinlock_acquire( &h_al->obj.lock ); - - /* Validate index. */ - if( hdl >= cl_vector_get_size( &h_al->hdl_vector ) ) - { - cl_spinlock_release( &h_al->obj.lock ); - return NULL; - } - - /* Get the specified entry. */ - p_h = (al_handle_t*)cl_vector_get_ptr( &h_al->hdl_vector, (size_t)hdl ); - - /* - * Make sure that the handle is valid and the correct type. Note that we - * support having multiple possible subtypes provided, and check against - * any of them. - */ - if( (AL_BASE_TYPE( p_h->type ) != AL_OBJ_TYPE_H_CONN) || - ((AL_SUBTYPE( p_h->type ) & sub_type) == 0) ) - { - cl_spinlock_release( &h_al->obj.lock ); - return NULL; - } - - p_conn = (al_conn_t*)p_h->p_obj; - - /* Clear the entry. */ - p_h->type = AL_OBJ_TYPE_UNKNOWN; - p_h->p_obj = (al_obj_t*)(uintn_t)h_al->free_hdl; - h_al->free_hdl = hdl; - - __ref_conn( p_conn ); - - cl_spinlock_release( &h_al->obj.lock ); - return p_conn; -} - diff --git a/trunk/core/al/kernel/al_proxy.c b/trunk/core/al/kernel/al_proxy.c index 975bdb93..9e7b4641 100644 --- a/trunk/core/al/kernel/al_proxy.c +++ b/trunk/core/al/kernel/al_proxy.c @@ -1119,8 +1119,8 @@ proxy_rearm_pnp( return CL_INVALID_PARAMETER; } - if( cl_ioctl_in_size( h_ioctl ) < sizeof(ual_rearm_pnp_ioctl_in_t) || - cl_ioctl_out_size( h_ioctl ) < sizeof(ual_rearm_pnp_ioctl_out_t) ) + if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_rearm_pnp_ioctl_in_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ual_rearm_pnp_ioctl_out_t) ) { AL_EXIT( AL_DBG_DEV | AL_DBG_PNP ); return CL_INVALID_PARAMETER; diff --git a/trunk/core/al/kernel/al_proxy_cep.c b/trunk/core/al/kernel/al_proxy_cep.c new file mode 100644 index 00000000..69d22cd1 --- /dev/null +++ b/trunk/core/al/kernel/al_proxy_cep.c @@ -0,0 +1,940 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "al_debug.h" +#include "al_cm_cep.h" +#include "al_dev.h" +#include +#include "al_proxy.h" +#include "al.h" +#include "al_qp.h" + + +static cl_status_t +proxy_create_cep( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_create_cep_ioctl_t *p_ioctl; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_create_cep_ioctl_t*)cl_ioctl_out_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_out_size( h_ioctl ) != sizeof(ual_create_cep_ioctl_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + /* We use IRPs as notification mechanism so the callback is NULL. */ + p_ioctl->status = al_create_cep( p_context->h_al, NULL, + p_context, &p_ioctl->cid ); + + *p_ret_bytes = sizeof(ual_create_cep_ioctl_t); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static inline void +__complete_get_event_ioctl( + IN ib_al_handle_t h_al, + IN IRP* const p_irp, + IN NTSTATUS status ) +{ +#pragma warning(push, 3) + IoSetCancelRoutine( p_irp, NULL ); +#pragma warning(pop) + + /* Complete the IRP. */ + p_irp->IoStatus.Status = status; + p_irp->IoStatus.Information = 0; + IoCompleteRequest( p_irp, IO_NETWORK_INCREMENT ); + + deref_al_obj( &h_al->obj ); +} + + +static cl_status_t +proxy_destroy_cep( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + + AL_ENTER( AL_DBG_CM ); + + UNUSED_PARAM( p_ret_bytes ); + + p_context = (al_dev_open_context_t*)p_open_context; + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + al_destroy_cep( p_context->h_al, + *(net32_t*)cl_ioctl_in_buf( h_ioctl ), NULL ); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_listen( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_listen_ioctl_t *p_ioctl; + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_listen_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_listen_ioctl_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + /* Set the private data compare buffer to our kernel copy. */ + if( p_ioctl->cep_listen.p_cmp_buf ) + p_ioctl->cep_listen.p_cmp_buf = p_ioctl->compare; + + status = + al_cep_listen( p_context->h_al, p_ioctl->cid, &p_ioctl->cep_listen ); + + (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = status; + + *p_ret_bytes = sizeof(ib_api_status_t); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_pre_req( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_req_ioctl_t *p_ioctl; + ib_qp_handle_t h_qp; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_req_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(struct _ual_cep_req_ioctl_in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(struct _ual_cep_req_ioctl_out) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + *p_ret_bytes = sizeof(struct _ual_cep_req_ioctl_out); + + p_ioctl->in.cm_req.h_al = p_context->h_al; + p_ioctl->in.cm_req.p_primary_path = &p_ioctl->in.paths[0]; + if( p_ioctl->in.cm_req.p_alt_path ) + p_ioctl->in.cm_req.p_alt_path = &p_ioctl->in.paths[1]; + if( p_ioctl->in.cm_req.p_compare_buffer ) + p_ioctl->in.cm_req.p_compare_buffer = p_ioctl->in.compare; + if( p_ioctl->in.cm_req.p_req_pdata ) + p_ioctl->in.cm_req.p_req_pdata = p_ioctl->in.pdata; + + /* Get the kernel QP handle. */ + h_qp = (ib_qp_handle_t)al_hdl_ref( + p_context->h_al, (uint64_t)p_ioctl->in.cm_req.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + p_ioctl->out.status = IB_INVALID_QP_HANDLE; + goto done; + } + + p_ioctl->in.cm_req.h_qp = h_qp; + + p_ioctl->out.status = al_cep_pre_req( p_context->h_al, p_ioctl->in.cid, + &p_ioctl->in.cm_req, &p_ioctl->out.init ); + + deref_al_obj( &h_qp->obj ); + + if( p_ioctl->out.status != IB_SUCCESS ) + { +done: + cl_memclr( &p_ioctl->out.init, sizeof(ib_qp_mod_t) ); + } + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_send_req( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_send_req( + p_context->h_al, *(net32_t*)cl_ioctl_in_buf( h_ioctl ) ); + + *p_ret_bytes = sizeof(ib_api_status_t); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_pre_rep( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_rep_ioctl_t *p_ioctl; + ib_qp_handle_t h_qp; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_rep_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(struct _ual_cep_rep_ioctl_in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(struct _ual_cep_rep_ioctl_out) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + *p_ret_bytes = sizeof(struct _ual_cep_rep_ioctl_out); + + if( p_ioctl->in.cm_rep.p_rep_pdata ) + p_ioctl->in.cm_rep.p_rep_pdata = p_ioctl->in.pdata; + + /* Get the kernel QP handle. */ + h_qp = (ib_qp_handle_t)al_hdl_ref( + p_context->h_al, (uint64_t)p_ioctl->in.cm_rep.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + p_ioctl->out.status = IB_INVALID_QP_HANDLE; + goto done; + } + + p_ioctl->in.cm_rep.h_qp = h_qp; + + p_ioctl->out.status = al_cep_pre_rep( p_context->h_al, p_ioctl->in.cid, + p_context, &p_ioctl->in.cm_rep, &p_ioctl->out.init ); + + deref_al_obj( &h_qp->obj ); + + if( p_ioctl->out.status != IB_SUCCESS ) + { +done: + cl_memclr( &p_ioctl->out.init, sizeof(ib_qp_mod_t) ); + } + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_send_rep( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_send_rep( + p_context->h_al, *(net32_t*)cl_ioctl_in_buf( h_ioctl ) ); + + *p_ret_bytes = sizeof(ib_api_status_t); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_get_rtr( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_get_rtr_ioctl_t *p_ioctl; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_get_rtr_ioctl_t*)cl_ioctl_out_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ual_cep_get_rtr_ioctl_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + *p_ret_bytes = sizeof(ual_cep_get_rtr_ioctl_t); + + p_ioctl->status = al_cep_get_rtr_attr( p_context->h_al, + *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->rtr ); + + if( p_ioctl->status != IB_SUCCESS ) + cl_memclr( &p_ioctl->rtr, sizeof(ib_qp_mod_t) ); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_get_rts( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_get_rts_ioctl_t *p_ioctl; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_get_rts_ioctl_t*)cl_ioctl_out_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ual_cep_get_rts_ioctl_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + *p_ret_bytes = sizeof(ual_cep_get_rts_ioctl_t); + + p_ioctl->status = al_cep_get_rts_attr( p_context->h_al, + *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->rts ); + + if( p_ioctl->status != IB_SUCCESS ) + cl_memclr( &p_ioctl->rts, sizeof(ib_qp_mod_t) ); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_rtu( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_rtu_ioctl_t *p_ioctl; + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_rtu_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_rtu_ioctl_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + status = al_cep_rtu( p_context->h_al, + p_ioctl->cid, p_ioctl->pdata, p_ioctl->pdata_len ); + + (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = status; + + *p_ret_bytes = sizeof(ib_api_status_t); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_rej( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_rej_ioctl_t *p_ioctl; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_rej_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_rej_ioctl_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_rej( + p_context->h_al, p_ioctl->cid, p_ioctl->rej_status, p_ioctl->ari, + p_ioctl->ari_len, p_ioctl->pdata, p_ioctl->pdata_len ); + + *p_ret_bytes = sizeof(ib_api_status_t); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_mra( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_mra_ioctl_t *p_ioctl; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_mra_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_mra_ioctl_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + p_ioctl->cm_mra.p_mra_pdata = p_ioctl->pdata; + + (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_mra( + p_context->h_al, p_ioctl->cid, &p_ioctl->cm_mra ); + + *p_ret_bytes = sizeof(ib_api_status_t); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_lap( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_lap_ioctl_t *p_ioctl; + ib_api_status_t status; + ib_qp_handle_t h_qp; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_lap_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_lap_ioctl_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + *p_ret_bytes = sizeof(ib_api_status_t); + + p_ioctl->cm_lap.p_alt_path = &p_ioctl->alt_path; + if( p_ioctl->cm_lap.p_lap_pdata ) + p_ioctl->pdata; + + /* Get the kernel QP handle. */ + h_qp = (ib_qp_handle_t)al_hdl_ref( + p_context->h_al, (uint64_t)p_ioctl->cm_lap.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + status = IB_INVALID_QP_HANDLE; + goto done; + } + + p_ioctl->cm_lap.h_qp = h_qp; + + status = al_cep_lap( p_context->h_al, p_ioctl->cid, &p_ioctl->cm_lap ); + + deref_al_obj( &h_qp->obj ); + +done: + (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = status; + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_pre_apr( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_apr_ioctl_t *p_ioctl; + ib_qp_handle_t h_qp; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_apr_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(struct _ual_cep_apr_ioctl_in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(struct _ual_cep_apr_ioctl_out) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + *p_ret_bytes = sizeof(struct _ual_cep_apr_ioctl_out); + + if( p_ioctl->in.cm_apr.p_info ) + p_ioctl->in.cm_apr.p_info = (ib_apr_info_t*)p_ioctl->in.apr_info; + if( p_ioctl->in.cm_apr.p_apr_pdata ) + p_ioctl->in.cm_apr.p_apr_pdata = p_ioctl->in.pdata; + + /* Get the kernel QP handle. */ + h_qp = (ib_qp_handle_t)al_hdl_ref( + p_context->h_al, (uint64_t)p_ioctl->in.cm_apr.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + p_ioctl->out.status = IB_INVALID_QP_HANDLE; + goto done; + } + + p_ioctl->in.cm_apr.h_qp = h_qp; + + p_ioctl->out.status = al_cep_pre_apr( p_context->h_al, p_ioctl->in.cid, + &p_ioctl->in.cm_apr, &p_ioctl->out.apr ); + + deref_al_obj( &h_qp->obj ); + + if( p_ioctl->out.status != IB_SUCCESS ) + { +done: + cl_memclr( &p_ioctl->out.apr, sizeof(ib_qp_mod_t) ); + } + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_send_apr( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + + AL_ENTER( AL_DBG_CM ); + + UNUSED_PARAM( p_ret_bytes ); + + p_context = (al_dev_open_context_t*)p_open_context; + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_send_apr( + p_context->h_al, *(net32_t*)cl_ioctl_in_buf( h_ioctl ) ); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_dreq( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_dreq_ioctl_t *p_ioctl; + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_dreq_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_dreq_ioctl_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + /* Set the private data compare buffer to our kernel copy. */ + status = al_cep_dreq( p_context->h_al, + p_ioctl->cid, p_ioctl->pdata, p_ioctl->pdata_len ); + + (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = status; + + *p_ret_bytes = sizeof(ib_api_status_t); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_drep( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_drep_ioctl_t *p_ioctl; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_drep_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_drep_ioctl_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + p_ioctl->cm_drep.p_drep_pdata = p_ioctl->pdata; + + (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_drep( + p_context->h_al, p_ioctl->cid, &p_ioctl->cm_drep ); + + *p_ret_bytes = sizeof(ib_api_status_t); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_get_timewait( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_get_timewait_ioctl_t *p_ioctl; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_get_timewait_ioctl_t*)cl_ioctl_out_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ual_cep_get_timewait_ioctl_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + p_ioctl->status = al_cep_get_timewait( p_context->h_al, + *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->timewait_us ); + + *p_ret_bytes = sizeof(ual_cep_get_timewait_ioctl_t); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_poll( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_poll_ioctl_t *p_ioctl; + ib_mad_element_t *p_mad = NULL; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_poll_ioctl_t*)cl_ioctl_out_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ual_cep_poll_ioctl_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + *p_ret_bytes = sizeof(ual_cep_poll_ioctl_t); + + p_ioctl->status = al_cep_poll( p_context->h_al, + *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->new_cep, &p_mad ); + + if( p_ioctl->status == IB_SUCCESS ) + { + /* Copy the MAD for user consumption and free the it. */ + CL_ASSERT( p_mad ); + p_ioctl->element = *p_mad; + if( p_mad->grh_valid ) + p_ioctl->grh = *p_mad->p_grh; + else + cl_memclr( &p_ioctl->grh, sizeof(ib_grh_t) ); + cl_memcpy( p_ioctl->mad_buf, p_mad->p_mad_buf, MAD_BLOCK_SIZE ); + ib_put_mad( p_mad ); + } + else + { + cl_memclr( &p_ioctl->mad_buf, sizeof(MAD_BLOCK_SIZE) ); + cl_memclr( &p_ioctl->new_cep, sizeof(ib_cep_t) ); + } + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_get_event( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + NTSTATUS status; + IO_STACK_LOCATION *p_io_stack; + al_dev_open_context_t *p_context; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + UNUSED_PARAM( p_ret_bytes ); + + p_context = p_open_context; + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + if( (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_CM ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("Invalid file object type for request: %d\n", + p_io_stack->FileObject->FsContext2) ); + return CL_INVALID_PARAMETER; + } + + /* Check the size of the ioctl */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid IOCTL input buffer.\n") ); + return CL_INVALID_PARAMETER; + } + + cid = *(net32_t*)cl_ioctl_in_buf( h_ioctl ); + + status = al_cep_queue_irp( p_context->h_al, cid, h_ioctl ); + if( status != STATUS_PENDING ) + { + /* Invalid CID. Complete the request. */ + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + AL_EXIT( AL_DBG_CM ); + return CL_PENDING; +} + + +cl_status_t cep_ioctl( + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + cl_status_t cl_status; + IO_STACK_LOCATION *p_io_stack; + void *p_context; + + AL_ENTER( AL_DBG_DEV ); + + CL_ASSERT( h_ioctl && p_ret_bytes ); + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + p_context = p_io_stack->FileObject->FsContext; + + if( !p_context ) + { + AL_EXIT( AL_DBG_DEV ); + return CL_INVALID_PARAMETER; + } + + switch( cl_ioctl_ctl_code( h_ioctl ) ) + { + case UAL_CREATE_CEP: + cl_status = proxy_create_cep( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_DESTROY_CEP: + cl_status = proxy_destroy_cep( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_LISTEN: + cl_status = proxy_cep_listen( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_PRE_REQ: + cl_status = proxy_cep_pre_req( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_SEND_REQ: + cl_status = proxy_cep_send_req( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_PRE_REP: + cl_status = proxy_cep_pre_rep( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_SEND_REP: + cl_status = proxy_cep_send_rep( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_GET_RTR: + cl_status = proxy_cep_get_rtr( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_GET_RTS: + cl_status = proxy_cep_get_rts( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_RTU: + cl_status = proxy_cep_rtu( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_REJ: + cl_status = proxy_cep_rej( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_MRA: + cl_status = proxy_cep_mra( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_LAP: + cl_status = proxy_cep_lap( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_PRE_APR: + cl_status = proxy_cep_pre_apr( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_SEND_APR: + cl_status = proxy_cep_send_apr( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_DREQ: + cl_status = proxy_cep_dreq( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_DREP: + cl_status = proxy_cep_drep( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_GET_TIMEWAIT: + cl_status = proxy_cep_get_timewait( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_GET_EVENT: + cl_status = proxy_cep_get_event( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_POLL: + cl_status = proxy_cep_poll( p_context, h_ioctl, p_ret_bytes ); + break; + default: + cl_status = CL_INVALID_PARAMETER; + break; + } + + AL_EXIT( AL_DBG_DEV ); + return cl_status; +} diff --git a/trunk/core/al/kernel/al_proxy_cm.c b/trunk/core/al/kernel/al_proxy_cm.c index fa61cd8b..075c2d93 100644 --- a/trunk/core/al/kernel/al_proxy_cm.c +++ b/trunk/core/al/kernel/al_proxy_cm.c @@ -37,7 +37,7 @@ #include "al.h" #include "al_debug.h" #include "al_dev.h" -#include "al_cm.h" +#include "al_cm_cep.h" #include "al_qp.h" #include "al_proxy.h" diff --git a/trunk/core/al/kernel/al_proxy_subnet.c b/trunk/core/al/kernel/al_proxy_subnet.c index 94b4e689..73eddb1a 100644 --- a/trunk/core/al/kernel/al_proxy_subnet.c +++ b/trunk/core/al/kernel/al_proxy_subnet.c @@ -214,7 +214,12 @@ proxy_send_sa_req( p_context = p_open_context; p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); - if( (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_SA_REQ_SVC ) + /* + * We support SA requests coming in either through the main file object + * or the async file handle. + */ + if( p_io_stack->FileObject->FsContext2 && + (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_SA_REQ_SVC ) { AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid file object type for request: %d\n", @@ -279,9 +284,14 @@ proxy_send_sa_req( /* Synchronize with callbacks. */ cl_spinlock_acquire( &p_context->h_al->obj.lock ); + /* + * We never pass the user-mode flag when sending SA requests - the + * I/O manager will perform all synchronization to make this IRP sync + * if it needs to. + */ ib_status = al_send_sa_req( p_sa_req, p_ioctl->in.port_guid, p_ioctl->in.timeout_ms, p_ioctl->in.retry_cnt, - &p_ioctl->in.sa_req ); + &p_ioctl->in.sa_req, 0 ); if( ib_status == IB_SUCCESS ) { /* Hold a reference on the proxy context until the request completes. */ diff --git a/trunk/core/al/kernel/al_sa_req.c b/trunk/core/al/kernel/al_sa_req.c index b4ed77b4..6671f5ba 100644 --- a/trunk/core/al/kernel/al_sa_req.c +++ b/trunk/core/al/kernel/al_sa_req.c @@ -588,16 +588,35 @@ al_send_sa_req( IN const net64_t port_guid, IN const uint32_t timeout_ms, IN const uint32_t retry_cnt, - IN const ib_user_query_t* const p_sa_req_data ) + IN const ib_user_query_t* const p_sa_req_data, + IN const ib_al_flags_t flags ) { ib_api_status_t status; sa_req_svc_t *p_sa_req_svc; ib_mad_element_t *p_mad_request; ib_mad_t *p_mad_hdr; ib_sa_mad_t *p_sa_mad; + KEVENT event; CL_ENTER( AL_DBG_SA_REQ, g_al_dbg_lvl ); - + + if( flags & IB_FLAGS_SYNC ) + { + if( !cl_is_blockable() ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("Thread context not blockable\n") ); + return IB_INVALID_SETTING; + } + + KeInitializeEvent( &event, NotificationEvent, FALSE ); + p_sa_req->p_sync_event = &event; + } + else + { + p_sa_req->p_sync_event = NULL; + } + /* Locate the sa_req service to issue the sa_req on. */ p_sa_req->p_sa_req_svc = acquire_sa_req_svc( port_guid ); if( !p_sa_req->p_sa_req_svc ) @@ -662,6 +681,11 @@ al_send_sa_req( ib_put_mad( p_mad_request ); deref_al_obj( &p_sa_req->p_sa_req_svc->obj ); } + else if( flags & IB_FLAGS_SYNC ) + { + /* Wait for the MAD completion. */ + KeWaitForSingleObject( &event, Executive, KernelMode, FALSE, NULL ); + } CL_EXIT( AL_DBG_SA_REQ, g_al_dbg_lvl ); return status; @@ -680,6 +704,7 @@ sa_req_send_comp_cb( { al_sa_req_t *p_sa_req; sa_req_svc_t *p_sa_req_svc; + KEVENT *p_sync_event; CL_ENTER( AL_DBG_SA_REQ, g_al_dbg_lvl ); @@ -698,9 +723,12 @@ sa_req_send_comp_cb( p_sa_req = p_request_mad->send_context1; p_sa_req_svc = p_sa_req->p_sa_req_svc; + p_sync_event = p_sa_req->p_sync_event; p_sa_req->status = convert_wc_status( p_request_mad->status ); p_sa_req->pfn_sa_req_cb( p_sa_req, NULL ); + if( p_sync_event ) + KeSetEvent( p_sync_event, 0, FALSE ); deref_al_obj( &p_sa_req_svc->obj ); } @@ -724,6 +752,7 @@ sa_req_recv_comp_cb( al_sa_req_t *p_sa_req; sa_req_svc_t *p_sa_req_svc; ib_sa_mad_t *p_sa_mad; + KEVENT *p_sync_event; CL_ENTER( AL_DBG_SA_REQ, g_al_dbg_lvl ); @@ -732,6 +761,7 @@ sa_req_recv_comp_cb( p_sa_req = p_mad_response->send_context1; p_sa_req_svc = p_sa_req->p_sa_req_svc; + p_sync_event = p_sa_req->p_sync_event; //*** check for SA redirection... @@ -745,6 +775,8 @@ sa_req_recv_comp_cb( /* Notify the requestor of the result. */ CL_TRACE( AL_DBG_SA_REQ, g_al_dbg_lvl, ("notifying user\n") ); p_sa_req->pfn_sa_req_cb( p_sa_req, p_mad_response ); + if( p_sync_event ) + KeSetEvent( p_sync_event, 0, FALSE ); deref_al_obj( &p_sa_req_svc->obj ); CL_EXIT( AL_DBG_SA_REQ, g_al_dbg_lvl ); diff --git a/trunk/core/al/user/SOURCES b/trunk/core/al/user/SOURCES index e2be3afe..ab6f49dd 100644 --- a/trunk/core/al/user/SOURCES +++ b/trunk/core/al/user/SOURCES @@ -16,7 +16,7 @@ SOURCES=\ ual_av.c \ ual_ca.c \ ual_ci_ca.c \ - ual_cm.c \ + ual_cm_cep.c \ ual_cq.c \ ual_dm.c \ ual_mad.c \ @@ -36,7 +36,7 @@ SOURCES=\ ..\al_av.c \ ..\al_ca.c \ ..\al_ci_ca_shared.c \ - ..\al_cm_shared.c \ + ..\al_cm_qp.c \ ..\al_common.c \ ..\al_cq.c \ ..\al_dm.c \ diff --git a/trunk/core/al/user/al_dll.c b/trunk/core/al/user/al_dll.c index a801c024..734c52e5 100644 --- a/trunk/core/al/user/al_dll.c +++ b/trunk/core/al/user/al_dll.c @@ -148,10 +148,11 @@ do_al_dev_ioctl( if( cl_status != CL_SUCCESS ) { + CL_ASSERT( cl_status != CL_PENDING ); AL_TRACE( AL_DBG_ERROR, ("Error performing IOCTL 0x%08x to AL driver (%s)\n", command, CL_STATUS_MSG(cl_status)) ); - return IB_ERROR; + return CL_ERROR; } AL_EXIT( AL_DBG_DEV ); diff --git a/trunk/core/al/user/ual_cm_cep.c b/trunk/core/al/user/ual_cm_cep.c new file mode 100644 index 00000000..355acd40 --- /dev/null +++ b/trunk/core/al/user/ual_cm_cep.c @@ -0,0 +1,1407 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include +#include "al_common.h" +#include "al_cm_cep.h" +#include "al_cm_conn.h" +#include "al_cm_sidr.h" +#include "al_debug.h" +#include "ib_common.h" +#include "al_mgr.h" +//#include "al_ca.h" +#include "al.h" +//#include "al_mad.h" +#include "al_qp.h" + + +#define UAL_CEP_MIN (512) +#define UAL_CEP_GROW (256) + + +/* Global connection manager object. */ +typedef struct _ual_cep_mgr +{ + al_obj_t obj; + + cl_ptr_vector_t cep_vector; + + /* File handle on which to issue query IOCTLs. */ + HANDLE h_file; + +} ual_cep_mgr_t; + + +typedef struct _al_ucep +{ + ib_cep_t cep; + al_pfn_cep_cb_t pfn_cb; + ib_al_handle_t h_al; + cl_list_item_t al_item; + + ib_pfn_destroy_cb_t pfn_destroy_cb; + + OVERLAPPED ov; + atomic32_t ref_cnt; + +} ucep_t; + + +/* Global instance of the CM agent. */ +ual_cep_mgr_t *gp_cep_mgr = NULL; + + +/* + * Frees the global CEP manager. Invoked during al_obj destruction. + */ +static void +__free_cep_mgr( + IN al_obj_t* p_obj ) +{ + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( &gp_cep_mgr->obj == p_obj ); + + if( gp_cep_mgr->h_file != INVALID_HANDLE_VALUE ) + CloseHandle( gp_cep_mgr->h_file ); + + cl_ptr_vector_destroy( &gp_cep_mgr->cep_vector ); + + destroy_al_obj( p_obj ); + + cl_free( gp_cep_mgr ); + gp_cep_mgr = NULL; + + AL_EXIT( AL_DBG_CM ); +} + + +/* + * Allocates and initialized the global user-mode CM agent. + */ +ib_api_status_t +create_cep_mgr( + IN al_obj_t* const p_parent_obj ) +{ + ib_api_status_t status; + cl_status_t cl_status; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( gp_cep_mgr == NULL ); + + /* Allocate the global CM agent. */ + gp_cep_mgr = (ual_cep_mgr_t*)cl_zalloc( sizeof(ual_cep_mgr_t) ); + if( !gp_cep_mgr ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("Failed allocation of global CEP manager.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + construct_al_obj( &gp_cep_mgr->obj, AL_OBJ_TYPE_CM ); + cl_ptr_vector_construct( &gp_cep_mgr->cep_vector ); + gp_cep_mgr->h_file = INVALID_HANDLE_VALUE; + + status = init_al_obj( &gp_cep_mgr->obj, NULL, FALSE, + NULL, NULL, __free_cep_mgr ); + if( status != IB_SUCCESS ) + { + __free_cep_mgr( &gp_cep_mgr->obj ); + AL_TRACE_EXIT( AL_DBG_ERROR, + ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) ); + return status; + } + /* Attach to the parent object. */ + status = attach_al_obj( p_parent_obj, &gp_cep_mgr->obj ); + if( status != IB_SUCCESS ) + { + gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL ); + AL_TRACE_EXIT( AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + cl_status = cl_ptr_vector_init( + &gp_cep_mgr->cep_vector, UAL_CEP_MIN, UAL_CEP_GROW ); + if( cl_status != CL_SUCCESS ) + { + gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL ); + AL_TRACE_EXIT( AL_DBG_ERROR, + ("cl_vector_init failed with status %s.\n", + CL_STATUS_MSG(cl_status)) ); + return ib_convert_cl_status( cl_status ); + } + + /* Create a file object on which to issue all CM requests. */ + gp_cep_mgr->h_file = ual_create_async_file( UAL_BIND_CM ); + if( gp_cep_mgr->h_file == INVALID_HANDLE_VALUE ) + { + gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL ); + AL_TRACE_EXIT( AL_DBG_ERROR, + ("ual_create_async_file for UAL_BIND_CM returned %d.\n", + GetLastError()) ); + return IB_ERROR; + } + + /* Release the reference from init_al_obj */ + deref_al_obj( &gp_cep_mgr->obj ); + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +void +al_cep_cleanup_al( + IN const ib_al_handle_t h_al ) +{ + cl_list_item_t *p_item; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + /* Destroy all CEPs associated with the input instance of AL. */ + cl_spinlock_acquire( &h_al->obj.lock ); + for( p_item = cl_qlist_head( &h_al->cep_list ); + p_item != cl_qlist_end( &h_al->cep_list ); + p_item = cl_qlist_head( &h_al->cep_list ) ) + { + /* + * Note that we don't walk the list - we can't hold the AL + * lock when cleaning up its CEPs because the cleanup path + * takes the CEP's lock. We always want to take the CEP + * before the AL lock to prevent any possibilities of deadlock. + * + * So we just get the CID, and then release the AL lock and try to + * destroy. This should unbind the CEP from the AL instance and + * remove it from the list, allowing the next CEP to be cleaned up + * in the next pass through. + */ + cid = PARENT_STRUCT( p_item, ucep_t, al_item )->cep.cid; + cl_spinlock_release( &h_al->obj.lock ); + al_destroy_cep( h_al, cid, NULL ); + cl_spinlock_acquire( &h_al->obj.lock ); + } + cl_spinlock_release( &h_al->obj.lock ); + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__destroy_ucep( + IN ucep_t* const p_cep ) +{ + if( p_cep->pfn_destroy_cb ) + p_cep->pfn_destroy_cb( p_cep->cep.context ); + cl_free( p_cep ); +} + + +ib_api_status_t +__create_ucep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN al_pfn_cep_cb_t pfn_cb, + IN void *context, + OUT net32_t* const p_cid ) +{ + ucep_t *p_cep; + DWORD bytes_ret; + ual_create_cep_ioctl_t ioctl; + + AL_ENTER( AL_DBG_CM ); + + p_cep = cl_zalloc( sizeof(ucep_t) ); + if( !p_cep ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, ("Failed to allocate ucep_t\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Initialize to two - one for the CEP, and one for the IOCTL. */ + p_cep->ref_cnt = 2; + + /* Store user parameters. */ + p_cep->pfn_cb = pfn_cb; + p_cep->cep.context = context; + + /* Create a kernel CEP only if we don't already have a CID. */ + if( cid == AL_INVALID_CID ) + { + if( !DeviceIoControl( g_al_device, UAL_CREATE_CEP, NULL, 0, + &ioctl, sizeof(ioctl), &bytes_ret, NULL ) || + bytes_ret != sizeof(ioctl) ) + { + __destroy_ucep( p_cep ); + AL_TRACE_EXIT( AL_DBG_ERROR, + ("UAL_CREATE_CEP IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + if( ioctl.status != IB_SUCCESS ) + { + __destroy_ucep( p_cep ); + AL_TRACE_EXIT( AL_DBG_ERROR, ("UAL_CREATE_CEP IOCTL returned %s\n", + ib_get_err_str( ioctl.status )) ); + return ioctl.status; + } + + p_cep->cep.cid = ioctl.cid; + } + else + { + p_cep->cep.cid = cid; + } + + /* Track the CEP before we issue any further IOCTLs on it. */ + cl_spinlock_acquire( &gp_cep_mgr->obj.lock ); + cl_ptr_vector_set_min_size( &gp_cep_mgr->cep_vector, p_cep->cep.cid + 1 ); + CL_ASSERT( !cl_ptr_vector_get( &gp_cep_mgr->cep_vector, p_cep->cep.cid ) ); + cl_ptr_vector_set( &gp_cep_mgr->cep_vector, p_cep->cep.cid, p_cep ); + cl_spinlock_release( &gp_cep_mgr->obj.lock ); + + /* Now issue a poll request. This request is async. */ + if( DeviceIoControl( gp_cep_mgr->h_file, UAL_CEP_GET_EVENT, + &p_cep->cep.cid, sizeof(p_cep->cep.cid), + NULL, 0, NULL, &p_cep->ov ) || + GetLastError() != ERROR_IO_PENDING ) + { + AL_TRACE( AL_DBG_ERROR, ("Failed to issue CEP poll IOCTL.\n") ); + cl_spinlock_acquire( &gp_cep_mgr->obj.lock ); + cl_ptr_vector_set( &gp_cep_mgr->cep_vector, p_cep->cep.cid, NULL ); + cl_spinlock_release( &gp_cep_mgr->obj.lock ); + + DeviceIoControl( g_al_device, UAL_DESTROY_CEP, &p_cep->cep.cid, + sizeof(p_cep->cep.cid), NULL, 0, &bytes_ret, NULL ); + + __destroy_ucep( p_cep ); + AL_EXIT( AL_DBG_CM ); + return IB_ERROR; + } + + p_cep->h_al = h_al; + + /* Track the CEP in its owning AL instance. */ + cl_spinlock_acquire( &h_al->obj.lock ); + cl_qlist_insert_tail( &h_al->cep_list, &p_cep->al_item ); + cl_spinlock_release( &h_al->obj.lock ); + + if( p_cid ) + *p_cid = p_cep->cep.cid; + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +al_create_cep( + IN ib_al_handle_t h_al, + IN al_pfn_cep_cb_t pfn_cb, + IN void *context, + OUT net32_t* const p_cid ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + status = __create_ucep( h_al, AL_INVALID_CID, pfn_cb, context, p_cid ); + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +/* + * Note that destroy_cep is synchronous. It does however handle the case + * where a user calls it from a callback context. + */ +ib_api_status_t +al_destroy_cep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ) +{ + ucep_t *p_cep; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + cl_spinlock_acquire( &gp_cep_mgr->obj.lock ); + if( cid < cl_ptr_vector_get_size( &gp_cep_mgr->cep_vector ) ) + { + p_cep = cl_ptr_vector_get( &gp_cep_mgr->cep_vector, cid ); + if( p_cep && p_cep->h_al == h_al ) + cl_ptr_vector_set( &gp_cep_mgr->cep_vector, cid, NULL ); + else + p_cep = NULL; + } + else + { + p_cep = NULL; + } + cl_spinlock_release( &gp_cep_mgr->obj.lock ); + + if( !p_cep ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_PARAMETER; + } + + p_cep->pfn_destroy_cb = pfn_destroy_cb; + + /* + * Remove from the AL instance. Note that once removed, all + * callbacks for an item will stop. + */ + cl_spinlock_acquire( &h_al->obj.lock ); + cl_qlist_remove_item( &h_al->cep_list, &p_cep->al_item ); + cl_spinlock_release( &h_al->obj.lock ); + + /* Destroy the kernel CEP right away. */ + DeviceIoControl( g_al_device, UAL_DESTROY_CEP, &p_cep->cep.cid, + sizeof(p_cep->cep.cid), NULL, 0, &bytes_ret, NULL ); + + if( !cl_atomic_dec( &p_cep->ref_cnt ) ) + { + /* We have no remaining refrences. */ + __destroy_ucep( p_cep ); + } + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +al_cep_listen( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN ib_cep_listen_t* const p_listen_info ) +{ + ual_cep_listen_ioctl_t ioctl; + ib_api_status_t status; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_listen_info ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_PARAMETER; + } + + ioctl.cid = cid; + ioctl.cep_listen = *p_listen_info; + if( p_listen_info->p_cmp_buf ) + { + if( p_listen_info->cmp_len > IB_REQ_PDATA_SIZE ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("Listen compare data larger than REQ private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( ioctl.compare, p_listen_info->p_cmp_buf, + p_listen_info->cmp_len ); + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_LISTEN, &ioctl, + sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) || + bytes_ret != sizeof(status) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("ual_cep_listen IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_pre_req( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_req_t* const p_cm_req, + OUT ib_qp_mod_t* const p_init ) +{ + ual_cep_req_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_cm_req ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_PARAMETER; + } + + if( !p_init ) + { + AL_EXIT( AL_DBG_ERROR ); + return IB_INVALID_PARAMETER; + } + + ioctl.in.cid = cid; + ioctl.in.cm_req = *p_cm_req; + ioctl.in.cm_req.h_qp = (ib_qp_handle_t)p_cm_req->h_qp->obj.hdl; + ioctl.in.paths[0] = *(p_cm_req->p_primary_path); + if( p_cm_req->p_alt_path ) + ioctl.in.paths[1] = *(p_cm_req->p_alt_path); + /* Copy private data, if any. */ + if( p_cm_req->p_req_pdata ) + { + if( p_cm_req->req_length > IB_REQ_PDATA_SIZE ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("private data larger than REQ private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( ioctl.in.pdata, p_cm_req->p_req_pdata, + p_cm_req->req_length ); + } + + /* Copy compare data, if any. */ + if( p_cm_req->p_compare_buffer ) + { + if( p_cm_req->compare_length > IB_REQ_PDATA_SIZE ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("REQ compare data larger than REQ private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( ioctl.in.compare, p_cm_req->p_compare_buffer, + p_cm_req->compare_length ); + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_PRE_REQ, &ioctl, + sizeof(ioctl.in), &ioctl, sizeof(ioctl.out), &bytes_ret, NULL ) || + bytes_ret != sizeof(ioctl.out) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("UAL_CEP_PRE_REQ IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + if( ioctl.out.status == IB_SUCCESS ) + *p_init = ioctl.out.init; + + AL_EXIT( AL_DBG_CM ); + return ioctl.out.status; +} + + +ib_api_status_t +al_cep_send_req( + IN ib_al_handle_t h_al, + IN net32_t cid ) +{ + ib_api_status_t status; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_SEND_REQ, &cid, + sizeof(cid), &status, sizeof(status), &bytes_ret, NULL ) || + bytes_ret != sizeof(status) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("UAL_CEP_SEND_REQ IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_pre_rep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN void *context, + IN const ib_cm_rep_t* const p_cm_rep, + OUT ib_qp_mod_t* const p_init ) +{ + ucep_t *p_cep; + ual_cep_rep_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_cm_rep ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_PARAMETER; + } + + if( !p_init ) + { + AL_EXIT( AL_DBG_ERROR ); + return IB_INVALID_PARAMETER; + } + + /* Store the context for the CEP. */ + cl_spinlock_acquire( &gp_cep_mgr->obj.lock ); + p_cep = cl_ptr_vector_get( &gp_cep_mgr->cep_vector, cid ); + if( !p_cep ) + { + cl_spinlock_release( &gp_cep_mgr->obj.lock ); + AL_EXIT( AL_DBG_ERROR ); + return IB_INVALID_PARAMETER; + } + p_cep->cep.context = context; + cl_spinlock_release( &gp_cep_mgr->obj.lock ); + + ioctl.in.cid = cid; + ioctl.in.cm_rep = *p_cm_rep; + ioctl.in.cm_rep.h_qp = (ib_qp_handle_t)p_cm_rep->h_qp->obj.hdl; + /* Copy private data, if any. */ + if( p_cm_rep->p_rep_pdata ) + { + if( p_cm_rep->rep_length > IB_REP_PDATA_SIZE ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("private data larger than REP private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( ioctl.in.pdata, p_cm_rep->p_rep_pdata, + p_cm_rep->rep_length ); + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_PRE_REP, &ioctl, + sizeof(ioctl.in), &ioctl, sizeof(ioctl.out), &bytes_ret, NULL ) || + bytes_ret != sizeof(ioctl.out) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("UAL_CEP_PRE_REQ IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + if( ioctl.out.status == IB_SUCCESS ) + *p_init = ioctl.out.init; + + AL_EXIT( AL_DBG_CM ); + return ioctl.out.status; +} + + +ib_api_status_t +al_cep_send_rep( + IN ib_al_handle_t h_al, + IN net32_t cid ) +{ + ib_api_status_t status; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_SEND_REP, &cid, + sizeof(cid), &status, sizeof(status), &bytes_ret, NULL ) || + bytes_ret != sizeof(status) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("UAL_CEP_SEND_REP IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_get_rtr_attr( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT ib_qp_mod_t* const p_rtr ) +{ + ual_cep_get_rtr_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_rtr ) + { + AL_EXIT( AL_DBG_ERROR ); + return IB_INVALID_PARAMETER; + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_GET_RTR, &cid, + sizeof(cid), &ioctl, sizeof(ioctl), &bytes_ret, NULL ) || + bytes_ret != sizeof(ioctl) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("UAL_CEP_GET_RTR IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + if( ioctl.status == IB_SUCCESS ) + *p_rtr = ioctl.rtr; + + AL_EXIT( AL_DBG_CM ); + return ioctl.status; +} + + +ib_api_status_t +al_cep_get_rts_attr( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT ib_qp_mod_t* const p_rts ) +{ + ual_cep_get_rts_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_rts ) + { + AL_EXIT( AL_DBG_ERROR ); + return IB_INVALID_PARAMETER; + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_GET_RTS, &cid, + sizeof(cid), &ioctl, sizeof(ioctl), &bytes_ret, NULL ) || + bytes_ret != sizeof(ioctl) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("UAL_CEP_GET_RTS IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + if( ioctl.status == IB_SUCCESS ) + *p_rts = ioctl.rts; + + AL_EXIT( AL_DBG_CM ); + return ioctl.status; +} + + +ib_api_status_t +al_cep_rtu( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const uint8_t* p_pdata OPTIONAL, + IN uint8_t pdata_len ) +{ + ib_api_status_t status; + ual_cep_rtu_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + ioctl.cid = cid; + /* Copy private data, if any. */ + if( p_pdata ) + { + if( pdata_len > IB_RTU_PDATA_SIZE ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("private data larger than RTU private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( ioctl.pdata, p_pdata, pdata_len ); + } + ioctl.pdata_len = pdata_len; + + if( !DeviceIoControl( g_al_device, UAL_CEP_RTU, &ioctl, + sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) || + bytes_ret != sizeof(status) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("UAL_CEP_RTU IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_rej( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN ib_rej_status_t rej_status, + IN const uint8_t* const p_ari, + IN uint8_t ari_len, + IN const uint8_t* const p_pdata, + IN uint8_t pdata_len ) +{ + ib_api_status_t status; + ual_cep_rej_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + ioctl.cid = cid; + ioctl.rej_status = rej_status; + if( p_ari ) + { + if( ari_len > IB_ARI_SIZE ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("private data larger than REJ ARI data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( ioctl.ari, p_ari, ari_len ); + ioctl.ari_len = ari_len; + } + else + { + ioctl.ari_len = 0; + } + /* Copy private data, if any. */ + if( p_pdata) + { + if( pdata_len > IB_REJ_PDATA_SIZE ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("private data larger than REJ private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( ioctl.pdata, p_pdata, pdata_len ); + ioctl.pdata_len = pdata_len; + } + else + { + ioctl.pdata_len = 0; + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_REJ, &ioctl, + sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) || + bytes_ret != sizeof(status) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("UAL_CEP_PRE_REQ IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_mra( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_mra_t* const p_cm_mra ) +{ + ib_api_status_t status; + ual_cep_mra_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_cm_mra ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + ioctl.cid = cid; + ioctl.cm_mra = *p_cm_mra; + /* Copy private data, if any. */ + if( p_cm_mra->p_mra_pdata ) + { + if( p_cm_mra->mra_length > IB_MRA_PDATA_SIZE ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("private data larger than MRA private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( + ioctl.pdata, p_cm_mra->p_mra_pdata, p_cm_mra->mra_length ); + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_MRA, &ioctl, + sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) || + bytes_ret != sizeof(status) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("UAL_CEP_MRA IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_lap( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_lap_t* const p_cm_lap ) +{ + ib_api_status_t status; + ual_cep_lap_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_cm_lap ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_cm_lap->p_alt_path ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + ioctl.cid = cid; + ioctl.cm_lap = *p_cm_lap; + ioctl.cm_lap.h_qp = (ib_qp_handle_t)p_cm_lap->h_qp->obj.hdl; + ioctl.alt_path = *(p_cm_lap->p_alt_path); + /* Copy private data, if any. */ + if( p_cm_lap->p_lap_pdata ) + { + if( p_cm_lap->lap_length > IB_LAP_PDATA_SIZE ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("private data larger than LAP private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( + ioctl.pdata, p_cm_lap->p_lap_pdata, p_cm_lap->lap_length ); + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_LAP, &ioctl, + sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) || + bytes_ret != sizeof(status) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("UAL_CEP_LAP IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_pre_apr( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_apr_t* const p_cm_apr, + OUT ib_qp_mod_t* const p_apr ) +{ + ual_cep_apr_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_cm_apr || !p_apr ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_PARAMETER; + } + + ioctl.in.cid = cid; + ioctl.in.cm_apr = *p_cm_apr; + ioctl.in.cm_apr.h_qp = (ib_qp_handle_t)p_cm_apr->h_qp->obj.hdl; + if( p_cm_apr->p_info ) + { + if( p_cm_apr->info_length > IB_APR_INFO_SIZE ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("private data larger than APR info data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( + ioctl.in.apr_info, p_cm_apr->p_info, p_cm_apr->info_length ); + } + /* Copy private data, if any. */ + if( p_cm_apr->p_apr_pdata ) + { + if( p_cm_apr->apr_length > IB_REJ_PDATA_SIZE ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("private data larger than APR private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( + ioctl.in.pdata, p_cm_apr->p_apr_pdata, p_cm_apr->apr_length ); + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_REJ, &ioctl.in, + sizeof(ioctl.in), &ioctl.out, sizeof(ioctl.out), &bytes_ret, NULL ) || + bytes_ret != sizeof(ioctl.out) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("UAL_CEP_PRE_REQ IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + if( ioctl.out.status == IB_SUCCESS ) + *p_apr = ioctl.out.apr; + + AL_EXIT( AL_DBG_CM ); + return ioctl.out.status; +} + + +ib_api_status_t +al_cep_send_apr( + IN ib_al_handle_t h_al, + IN net32_t cid ) +{ + ib_api_status_t status; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_SEND_APR, &cid, + sizeof(cid), &status, sizeof(status), &bytes_ret, NULL ) || + bytes_ret != sizeof(status) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("UAL_CEP_SEND_APR IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_dreq( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const uint8_t* const p_pdata OPTIONAL, + IN const uint8_t pdata_len ) +{ + ib_api_status_t status; + ual_cep_dreq_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + ioctl.cid = cid; + /* Copy private data, if any. */ + if( p_pdata ) + { + if( pdata_len > IB_DREQ_PDATA_SIZE ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("private data larger than DREQ private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( ioctl.pdata, p_pdata, pdata_len ); + ioctl.pdata_len = pdata_len; + } + else + { + ioctl.pdata_len = 0; + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_DREQ, &ioctl, + sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) || + bytes_ret != sizeof(status) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("UAL_CEP_DREQ IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_drep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_drep_t* const p_cm_drep ) +{ + ib_api_status_t status; + ual_cep_drep_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_cm_drep ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + ioctl.cid = cid; + ioctl.cm_drep = *p_cm_drep; + /* Copy private data, if any. */ + if( p_cm_drep->p_drep_pdata ) + { + if( p_cm_drep->drep_length > IB_DREP_PDATA_SIZE ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("private data larger than DREP private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( + ioctl.pdata, p_cm_drep->p_drep_pdata, p_cm_drep->drep_length ); + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_DREP, &ioctl, + sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) || + bytes_ret != sizeof(status) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("UAL_CEP_DREP IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_get_timewait( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT uint64_t* const p_timewait_us ) +{ + ual_cep_get_timewait_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_timewait_us ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_GET_TIMEWAIT, &cid, sizeof(cid), + &ioctl, sizeof(ioctl), &bytes_ret, NULL ) || + bytes_ret != sizeof(ioctl) ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("UAL_CEP_DREP IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + if( ioctl.status == IB_SUCCESS ) + *p_timewait_us = ioctl.timewait_us; + + AL_EXIT( AL_DBG_CM ); + return ioctl.status; +} +// +// +//ib_api_status_t +//al_cep_migrate( +// IN ib_al_handle_t h_al, +// IN net32_t cid ); +// +// +//ib_api_status_t +//al_cep_established( +// IN ib_al_handle_t h_al, +// IN net32_t cid ); + + +ib_api_status_t +al_cep_poll( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN OUT ib_cep_t* const p_new_cep, + OUT ib_mad_element_t** const pp_mad ) +{ + ucep_t *p_cep; + ib_api_status_t status; + ual_cep_poll_ioctl_t ioctl; + DWORD bytes_ret; + ib_mad_element_t *p_mad; + ib_grh_t *p_grh; + ib_mad_t *p_mad_buf; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_new_cep || !pp_mad ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_PARAMETER; + } + + cl_spinlock_acquire( &gp_cep_mgr->obj.lock ); + if( cid > cl_ptr_vector_get_size( &gp_cep_mgr->cep_vector ) ) + p_cep = NULL; + else + p_cep = cl_ptr_vector_get( &gp_cep_mgr->cep_vector, cid ); + cl_spinlock_release( &gp_cep_mgr->obj.lock ); + if( !p_cep ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_PARAMETER; + } + + status = ib_get_mad( g_pool_key, MAD_BLOCK_SIZE, &p_mad ); + if( status != IB_SUCCESS ) + { + AL_TRACE_EXIT( AL_DBG_ERROR, + ("ib_get_mad returned %s.\n", ib_get_err_str( status )) ); + return status; + } + + p_mad_buf = p_mad->p_mad_buf; + p_grh = p_mad->p_grh; + + if( !DeviceIoControl( g_al_device, UAL_CEP_POLL, &cid, + sizeof(cid), &ioctl, sizeof(ioctl), &bytes_ret, NULL ) || + bytes_ret != sizeof(ioctl) ) + { + ib_put_mad( p_mad ); + AL_TRACE_EXIT( AL_DBG_ERROR, + ("UAL_CEP_GET_RTS IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + if( ioctl.status == IB_SUCCESS ) + { + if( ioctl.new_cep.cid != AL_INVALID_CID ) + { + /* Need to create a new CEP for user-mode. */ + status = __create_ucep( p_cep->h_al, ioctl.new_cep.cid, + p_cep->pfn_cb, ioctl.new_cep.context, NULL ); + if( status != IB_SUCCESS ) + { + DeviceIoControl( g_al_device, UAL_DESTROY_CEP, + &ioctl.new_cep.cid, sizeof(ioctl.new_cep.cid), + NULL, 0, &bytes_ret, NULL ); + goto err; + } + } + + /* Copy the MAD payload as it's all that's used. */ + *p_mad = ioctl.element; + p_mad->p_grh = p_grh; + if( p_mad->grh_valid ) + cl_memcpy( p_mad->p_grh, &ioctl.grh, sizeof(ib_grh_t) ); + p_mad->p_mad_buf = p_mad_buf; + + cl_memcpy( p_mad->p_mad_buf, ioctl.mad_buf, MAD_BLOCK_SIZE ); + + *p_new_cep = ioctl.new_cep; + *pp_mad = p_mad; + } + else + { +err: + ib_put_mad( p_mad ); + } + + AL_EXIT( AL_DBG_CM ); + return ioctl.status; +} + + +/* Callback to process CM events */ +void +cm_cb( + IN DWORD error_code, + IN DWORD ret_bytes, + IN LPOVERLAPPED p_ov ) +{ + ucep_t *p_cep; + + AL_ENTER( AL_DBG_CM ); + + /* The UAL_CEP_GET_EVENT IOCTL does not have any output data. */ + UNUSED_PARAM( ret_bytes ); + + p_cep = PARENT_STRUCT( p_ov, ucep_t, ov ); + + if( !error_code ) + { + p_cep->pfn_cb( p_cep->h_al, &p_cep->cep ); + + if( !DeviceIoControl( gp_cep_mgr->h_file, UAL_CEP_GET_EVENT, + &p_cep->cep.cid, sizeof(p_cep->cep.cid), NULL, 0, + NULL, &p_cep->ov ) && GetLastError() == ERROR_IO_PENDING ) + { + AL_EXIT( AL_DBG_CM ); + return; + } + else if( GetLastError() != ERROR_INVALID_PARAMETER ) + { + /* We can get ERROR_INVALID_PARAMETER if the CEP was destroyed. */ + AL_TRACE( AL_DBG_ERROR, + ("DeviceIoControl for CEP callback request returned %d.\n", + GetLastError()) ); + } + } + else + { + AL_TRACE( AL_DBG_WARN, + ("UAL_CEP_GET_EVENT IOCTL returned %d.\n", error_code) ); + } + + /* + * We failed to issue the next request or the previous request was + * cancelled. Release the reference held by the previous IOCTL and exit. + */ + if( !cl_atomic_dec( &p_cep->ref_cnt ) ) + __destroy_ucep( p_cep ); + + AL_EXIT( AL_DBG_CM ); +} diff --git a/trunk/core/al/user/ual_mgr.c b/trunk/core/al/user/ual_mgr.c index ccee3ec5..48511ed5 100644 --- a/trunk/core/al/user/ual_mgr.c +++ b/trunk/core/al/user/ual_mgr.c @@ -44,9 +44,9 @@ #include "al_cq.h" #include "ual_ca.h" #include "ual_qp.h" -#include "ual_cm.h" #include "ual_mad.h" #include "ib_common.h" +#include "al_cm_cep.h" /* Global AL manager handle is defined in al_mgr_shared.c */ @@ -66,9 +66,9 @@ static DWORD WINAPI __cb_thread_routine( IN void *context ); -static void -__process_cm_cb( - IN cm_cb_ioctl_info_t* p_cm_cb_info); +//static void +//__process_cm_cb( +// IN cm_cb_ioctl_info_t* p_cm_cb_info); static void __process_misc_cb( @@ -87,7 +87,7 @@ __cleanup_ual_mgr( gp_al_mgr->ual_mgr.exit_thread = TRUE; /* Closing the file handles cancels any pending I/O requests. */ - CloseHandle( gp_al_mgr->ual_mgr.h_cm_file ); + //CloseHandle( gp_al_mgr->ual_mgr.h_cm_file ); CloseHandle( gp_al_mgr->ual_mgr.h_cq_file ); CloseHandle( gp_al_mgr->ual_mgr.h_misc_file ); CloseHandle( g_al_device ); @@ -314,15 +314,15 @@ create_al_mgr() } /* Create CM callback file handle. */ - gp_al_mgr->ual_mgr.h_cm_file = ual_create_async_file( UAL_BIND_CM ); - if( gp_al_mgr->ual_mgr.h_cq_file == INVALID_HANDLE_VALUE ) - { - gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); - AL_TRACE_EXIT( AL_DBG_ERROR, - ("ual_create_async_file for UAL_BIND_CM returned %d.\n", - GetLastError()) ); - return IB_ERROR; - } + //gp_al_mgr->ual_mgr.h_cm_file = ual_create_async_file( UAL_BIND_CM ); + //if( gp_al_mgr->ual_mgr.h_cq_file == INVALID_HANDLE_VALUE ) + //{ + // gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); + // AL_TRACE_EXIT( AL_DBG_ERROR, + // ("ual_create_async_file for UAL_BIND_CM returned %d.\n", + // GetLastError()) ); + // return IB_ERROR; + //} /* Create the CQ completion callback file handle. */ gp_al_mgr->ual_mgr.h_cq_file = ual_create_async_file( UAL_BIND_CQ ); @@ -425,6 +425,16 @@ create_al_mgr() return ib_status; } + /* Initialize CM */ + ib_status = create_cep_mgr( &gp_al_mgr->obj ); + if( ib_status != IB_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); + AL_TRACE_EXIT( AL_DBG_ERROR, + ("create_cm_mgr failed, status = 0x%x.\n", ib_status) ); + return ib_status; + } + cl_status = cl_event_init( &gp_al_mgr->ual_mgr.sync_event, FALSE ); if( cl_status != CL_SUCCESS ) { @@ -448,20 +458,20 @@ create_al_mgr() } } - if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cm_file, UAL_GET_CM_CB_INFO, - NULL, 0, - &gp_al_mgr->ual_mgr.cm_cb_info, sizeof(cm_cb_ioctl_info_t), - NULL, &gp_al_mgr->ual_mgr.cm_ov ) ) - { - if( GetLastError() != ERROR_IO_PENDING ) - { - AL_TRACE_EXIT( AL_DBG_ERROR, - ("DeviceIoControl for CM callback request returned %d.\n", - GetLastError()) ); - gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL); - return IB_ERROR; - } - } + //if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cm_file, UAL_GET_CM_CB_INFO, + // NULL, 0, + // &gp_al_mgr->ual_mgr.cm_cb_info, sizeof(cm_cb_ioctl_info_t), + // NULL, &gp_al_mgr->ual_mgr.cm_ov ) ) + //{ + // if( GetLastError() != ERROR_IO_PENDING ) + // { + // AL_TRACE_EXIT( AL_DBG_ERROR, + // ("DeviceIoControl for CM callback request returned %d.\n", + // GetLastError()) ); + // gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL); + // return IB_ERROR; + // } + //} if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cq_file, UAL_GET_COMP_CB_INFO, NULL, 0, @@ -509,172 +519,172 @@ create_al_mgr() /* * UAL thread start routines. */ - - -/* Thread to process the asynchronous CM notifications */ -void -cm_cb( - IN DWORD error_code, - IN DWORD ret_bytes, - IN LPOVERLAPPED p_ov ) -{ - AL_ENTER( AL_DBG_CM ); - - UNUSED_PARAM( p_ov ); - - if( !error_code && ret_bytes ) - { - /* Check the record type and adjust the pointers */ - /* TBD */ - __process_cm_cb( &gp_al_mgr->ual_mgr.cm_cb_info ); - } - - if( error_code != ERROR_OPERATION_ABORTED ) - { - if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cm_file, UAL_GET_CM_CB_INFO, - NULL, 0, - &gp_al_mgr->ual_mgr.cm_cb_info, sizeof(cm_cb_ioctl_info_t), - NULL, &gp_al_mgr->ual_mgr.cm_ov ) ) - { - if( GetLastError() != ERROR_IO_PENDING ) - { - AL_TRACE_EXIT( AL_DBG_ERROR, - ("DeviceIoControl for CM callback request returned %d.\n", - GetLastError()) ); - } - } - } - - AL_EXIT( AL_DBG_CM ); -} - - - -static void -__process_cm_cb( - IN cm_cb_ioctl_info_t* p_cm_cb_info) -{ - switch( p_cm_cb_info->rec_type) - { - case CM_REQ_REC: - { - struct _cm_req_cb_ioctl_rec *p_ioctl_rec = - &p_cm_cb_info->ioctl_rec.cm_req_cb_ioctl_rec; - - if (p_ioctl_rec->req_rec.qp_type == IB_QPT_UNRELIABLE_DGRM) - { - p_ioctl_rec->req_rec.p_req_pdata = - (uint8_t *)&p_ioctl_rec->cm_req_pdata_rec.sidr_req_pdata; - } - else - { - p_ioctl_rec->req_rec.p_req_pdata = - (uint8_t *)&p_ioctl_rec->cm_req_pdata_rec.req_pdata; - } - ual_cm_req_cb( &p_ioctl_rec->req_rec, &p_ioctl_rec->qp_mod_rtr, - &p_ioctl_rec->qp_mod_rts, p_ioctl_rec->timeout_ms ); - break; - } - case CM_REP_REC: - { - struct _cm_rep_cb_ioctl_rec *p_ioctl_rec = - &p_cm_cb_info->ioctl_rec.cm_rep_cb_ioctl_rec; - - if (p_ioctl_rec->rep_rec.qp_type == IB_QPT_UNRELIABLE_DGRM) - { - p_ioctl_rec->rep_rec.p_rep_pdata = - (uint8_t *)&p_ioctl_rec->cm_rep_pdata_rec.sidr_rep_pdata; - } - else - { - p_ioctl_rec->rep_rec.p_rep_pdata = - (uint8_t *)&p_ioctl_rec->cm_rep_pdata_rec.rep_pdata; - } - ual_cm_rep_cb( &p_ioctl_rec->rep_rec, &p_ioctl_rec->qp_mod_rtr, - &p_ioctl_rec->qp_mod_rts ); - break; - } - case CM_RTU_REC: - { - struct _cm_rtu_cb_ioctl_rec *p_ioctl_rec = - &p_cm_cb_info->ioctl_rec.cm_rtu_cb_ioctl_rec; - - p_ioctl_rec->rtu_rec.p_rtu_pdata = (uint8_t *)&p_ioctl_rec->rtu_pdata; - ual_cm_rtu_cb( &p_ioctl_rec->rtu_rec ); - break; - } - case CM_REJ_REC: - { - struct _cm_rej_cb_ioctl_rec *p_ioctl_rec = - &p_cm_cb_info->ioctl_rec.cm_rej_cb_ioctl_rec; - - p_ioctl_rec->rej_rec.p_rej_pdata = - (uint8_t*)&p_ioctl_rec->rej_pdata; - p_ioctl_rec->rej_rec.p_ari = - (uint8_t*)&p_ioctl_rec->ari_pdata; - ual_cm_rej_cb( &p_ioctl_rec->rej_rec ); - break; - } - case CM_MRA_REC: - { - struct _cm_mra_cb_ioctl_rec *p_ioctl_rec = - &p_cm_cb_info->ioctl_rec.cm_mra_cb_ioctl_rec; - - p_ioctl_rec->mra_rec.p_mra_pdata = - (uint8_t*)&p_ioctl_rec->mra_pdata; - ual_cm_mra_cb( &p_cm_cb_info->ioctl_rec.cm_mra_cb_ioctl_rec.mra_rec ); - break; - } - case CM_LAP_REC: - { - struct _cm_lap_cb_ioctl_rec *p_ioctl_rec = - &p_cm_cb_info->ioctl_rec.cm_lap_cb_ioctl_rec; - - p_ioctl_rec->lap_rec.p_lap_pdata = - (uint8_t *)&p_ioctl_rec->lap_pdata; - ual_cm_lap_cb( &p_ioctl_rec->lap_rec ); - break; - } - case CM_APR_REC: - { - struct _cm_apr_cb_ioctl_rec *p_ioctl_rec = - &p_cm_cb_info->ioctl_rec.cm_apr_cb_ioctl_rec; - - p_ioctl_rec->apr_rec.p_apr_pdata = - (uint8_t*)&p_ioctl_rec->apr_pdata; - p_ioctl_rec->apr_rec.p_info = - (uint8_t*)&p_ioctl_rec->apr_info; - ual_cm_apr_cb( &p_ioctl_rec->apr_rec ); - break; - } - case CM_DREQ_REC: - { - struct _cm_dreq_cb_ioctl_rec *p_ioctl_rec = - &p_cm_cb_info->ioctl_rec.cm_dreq_cb_ioctl_rec; - - p_ioctl_rec->dreq_rec.p_dreq_pdata = - (uint8_t*)&p_ioctl_rec->dreq_pdata; - ual_cm_dreq_cb( &p_ioctl_rec->dreq_rec ); - break; - } - case CM_DREP_REC: - { - struct _cm_drep_cb_ioctl_rec *p_ioctl_rec = - &p_cm_cb_info->ioctl_rec.cm_drep_cb_ioctl_rec; - - p_ioctl_rec->drep_rec.p_drep_pdata = - (uint8_t*)&p_ioctl_rec->drep_pdata; - ual_cm_drep_cb( &p_ioctl_rec->drep_rec ); - break; - } - default: - /* Unknown record type - just return */ - break; - } -} - - - +// +// +///* Thread to process the asynchronous CM notifications */ +//void +//cm_cb( +// IN DWORD error_code, +// IN DWORD ret_bytes, +// IN LPOVERLAPPED p_ov ) +//{ +// AL_ENTER( AL_DBG_CM ); +// +// UNUSED_PARAM( p_ov ); +// +// if( !error_code && ret_bytes ) +// { +// /* Check the record type and adjust the pointers */ +// /* TBD */ +// __process_cm_cb( &gp_al_mgr->ual_mgr.cm_cb_info ); +// } +// +// if( error_code != ERROR_OPERATION_ABORTED ) +// { +// if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cm_file, UAL_GET_CM_CB_INFO, +// NULL, 0, +// &gp_al_mgr->ual_mgr.cm_cb_info, sizeof(cm_cb_ioctl_info_t), +// NULL, &gp_al_mgr->ual_mgr.cm_ov ) ) +// { +// if( GetLastError() != ERROR_IO_PENDING ) +// { +// AL_TRACE_EXIT( AL_DBG_ERROR, +// ("DeviceIoControl for CM callback request returned %d.\n", +// GetLastError()) ); +// } +// } +// } +// +// AL_EXIT( AL_DBG_CM ); +//} + + + +//static void +//__process_cm_cb( +// IN cm_cb_ioctl_info_t* p_cm_cb_info) +//{ +// switch( p_cm_cb_info->rec_type) +// { +// case CM_REQ_REC: +// { +// struct _cm_req_cb_ioctl_rec *p_ioctl_rec = +// &p_cm_cb_info->ioctl_rec.cm_req_cb_ioctl_rec; +// +// if (p_ioctl_rec->req_rec.qp_type == IB_QPT_UNRELIABLE_DGRM) +// { +// p_ioctl_rec->req_rec.p_req_pdata = +// (uint8_t *)&p_ioctl_rec->cm_req_pdata_rec.sidr_req_pdata; +// } +// else +// { +// p_ioctl_rec->req_rec.p_req_pdata = +// (uint8_t *)&p_ioctl_rec->cm_req_pdata_rec.req_pdata; +// } +// ual_cm_req_cb( &p_ioctl_rec->req_rec, &p_ioctl_rec->qp_mod_rtr, +// &p_ioctl_rec->qp_mod_rts, p_ioctl_rec->timeout_ms ); +// break; +// } +// case CM_REP_REC: +// { +// struct _cm_rep_cb_ioctl_rec *p_ioctl_rec = +// &p_cm_cb_info->ioctl_rec.cm_rep_cb_ioctl_rec; +// +// if (p_ioctl_rec->rep_rec.qp_type == IB_QPT_UNRELIABLE_DGRM) +// { +// p_ioctl_rec->rep_rec.p_rep_pdata = +// (uint8_t *)&p_ioctl_rec->cm_rep_pdata_rec.sidr_rep_pdata; +// } +// else +// { +// p_ioctl_rec->rep_rec.p_rep_pdata = +// (uint8_t *)&p_ioctl_rec->cm_rep_pdata_rec.rep_pdata; +// } +// ual_cm_rep_cb( &p_ioctl_rec->rep_rec, &p_ioctl_rec->qp_mod_rtr, +// &p_ioctl_rec->qp_mod_rts ); +// break; +// } +// case CM_RTU_REC: +// { +// struct _cm_rtu_cb_ioctl_rec *p_ioctl_rec = +// &p_cm_cb_info->ioctl_rec.cm_rtu_cb_ioctl_rec; +// +// p_ioctl_rec->rtu_rec.p_rtu_pdata = (uint8_t *)&p_ioctl_rec->rtu_pdata; +// ual_cm_rtu_cb( &p_ioctl_rec->rtu_rec ); +// break; +// } +// case CM_REJ_REC: +// { +// struct _cm_rej_cb_ioctl_rec *p_ioctl_rec = +// &p_cm_cb_info->ioctl_rec.cm_rej_cb_ioctl_rec; +// +// p_ioctl_rec->rej_rec.p_rej_pdata = +// (uint8_t*)&p_ioctl_rec->rej_pdata; +// p_ioctl_rec->rej_rec.p_ari = +// (uint8_t*)&p_ioctl_rec->ari_pdata; +// ual_cm_rej_cb( &p_ioctl_rec->rej_rec ); +// break; +// } +// case CM_MRA_REC: +// { +// struct _cm_mra_cb_ioctl_rec *p_ioctl_rec = +// &p_cm_cb_info->ioctl_rec.cm_mra_cb_ioctl_rec; +// +// p_ioctl_rec->mra_rec.p_mra_pdata = +// (uint8_t*)&p_ioctl_rec->mra_pdata; +// ual_cm_mra_cb( &p_cm_cb_info->ioctl_rec.cm_mra_cb_ioctl_rec.mra_rec ); +// break; +// } +// case CM_LAP_REC: +// { +// struct _cm_lap_cb_ioctl_rec *p_ioctl_rec = +// &p_cm_cb_info->ioctl_rec.cm_lap_cb_ioctl_rec; +// +// p_ioctl_rec->lap_rec.p_lap_pdata = +// (uint8_t *)&p_ioctl_rec->lap_pdata; +// ual_cm_lap_cb( &p_ioctl_rec->lap_rec ); +// break; +// } +// case CM_APR_REC: +// { +// struct _cm_apr_cb_ioctl_rec *p_ioctl_rec = +// &p_cm_cb_info->ioctl_rec.cm_apr_cb_ioctl_rec; +// +// p_ioctl_rec->apr_rec.p_apr_pdata = +// (uint8_t*)&p_ioctl_rec->apr_pdata; +// p_ioctl_rec->apr_rec.p_info = +// (uint8_t*)&p_ioctl_rec->apr_info; +// ual_cm_apr_cb( &p_ioctl_rec->apr_rec ); +// break; +// } +// case CM_DREQ_REC: +// { +// struct _cm_dreq_cb_ioctl_rec *p_ioctl_rec = +// &p_cm_cb_info->ioctl_rec.cm_dreq_cb_ioctl_rec; +// +// p_ioctl_rec->dreq_rec.p_dreq_pdata = +// (uint8_t*)&p_ioctl_rec->dreq_pdata; +// ual_cm_dreq_cb( &p_ioctl_rec->dreq_rec ); +// break; +// } +// case CM_DREP_REC: +// { +// struct _cm_drep_cb_ioctl_rec *p_ioctl_rec = +// &p_cm_cb_info->ioctl_rec.cm_drep_cb_ioctl_rec; +// +// p_ioctl_rec->drep_rec.p_drep_pdata = +// (uint8_t*)&p_ioctl_rec->drep_pdata; +// ual_cm_drep_cb( &p_ioctl_rec->drep_rec ); +// break; +// } +// default: +// /* Unknown record type - just return */ +// break; +// } +//} +// +// +// static void __process_comp_cb( IN comp_cb_ioctl_info_t* p_comp_cb_info ) @@ -1136,7 +1146,7 @@ do_open_al( cl_qlist_init( &h_al->mad_list ); cl_qlist_init( &h_al->key_list ); cl_qlist_init( &h_al->query_list ); - cl_qlist_init( &h_al->conn_list ); + cl_qlist_init( &h_al->cep_list ); if( cl_spinlock_init( &h_al->mad_lock ) != CL_SUCCESS ) { @@ -1213,6 +1223,7 @@ __cb_thread_routine( switch( key ) { case UAL_BIND_CM: + //DebugBreak(); /* CM callback. */ cm_cb( err, ret_bytes, p_ov ); break; diff --git a/trunk/core/al/user/ual_mgr.h b/trunk/core/al/user/ual_mgr.h index a3e1682a..dd818670 100644 --- a/trunk/core/al/user/ual_mgr.h +++ b/trunk/core/al/user/ual_mgr.h @@ -53,9 +53,9 @@ typedef struct _ual_mgr HANDLE h_cb_port; /* File to handle CM related notifications */ - HANDLE h_cm_file; - cm_cb_ioctl_info_t cm_cb_info; - OVERLAPPED cm_ov; + //HANDLE h_cm_file; + //cm_cb_ioctl_info_t cm_cb_info; + //OVERLAPPED cm_ov; /* Thread to handle work request completions */ HANDLE h_cq_file; diff --git a/trunk/core/al/user/ual_pnp.c b/trunk/core/al/user/ual_pnp.c index 5754a8bb..a6cd6bb8 100644 --- a/trunk/core/al/user/ual_pnp.c +++ b/trunk/core/al/user/ual_pnp.c @@ -143,7 +143,7 @@ create_pnp( return status; } - /* Create a file object on which to issue all SA requests. */ + /* Create a file object on which to issue all PNP requests. */ gp_pnp->h_file = ual_create_async_file( UAL_BIND_PNP ); if( gp_pnp->h_file == INVALID_HANDLE_VALUE ) { diff --git a/trunk/core/al/user/ual_sa_req.c b/trunk/core/al/user/ual_sa_req.c index 4a989d0d..1450d66c 100644 --- a/trunk/core/al/user/ual_sa_req.c +++ b/trunk/core/al/user/ual_sa_req.c @@ -161,16 +161,18 @@ free_sa_req_mgr( } - ib_api_status_t al_send_sa_req( IN al_sa_req_t *p_sa_req, IN const net64_t port_guid, IN const uint32_t timeout_ms, IN const uint32_t retry_cnt, - IN const ib_user_query_t* const p_sa_req_data ) + IN const ib_user_query_t* const p_sa_req_data, + IN const ib_al_flags_t flags ) { ib_api_status_t status; + HANDLE h_dev; + DWORD ret_bytes; AL_ENTER( AL_DBG_QUERY ); @@ -190,7 +192,12 @@ al_send_sa_req( p_sa_req->ioctl.in.ph_sa_req = &p_sa_req->hdl; p_sa_req->ioctl.in.p_status = &p_sa_req->status; - if( !DeviceIoControl( gp_sa_req_mgr->h_sa_dev, UAL_SEND_SA_REQ, + if( flags & IB_FLAGS_SYNC ) + h_dev = g_al_device; + else + h_dev = gp_sa_req_mgr->h_sa_dev; + + if( !DeviceIoControl( h_dev, UAL_SEND_SA_REQ, &p_sa_req->ioctl.in, sizeof(p_sa_req->ioctl.in), &p_sa_req->ioctl.out, sizeof(p_sa_req->ioctl.out), NULL, &p_sa_req->ov ) ) @@ -208,8 +215,18 @@ al_send_sa_req( } else { - CL_ASSERT( GetLastError() == ERROR_IO_PENDING ); - status = IB_ERROR; + /* Completed synchronously. */ + if( GetOverlappedResult( h_dev, &p_sa_req->ov, &ret_bytes, FALSE ) ) + { + status = IB_SUCCESS; + /* Process the completion. */ + sa_req_cb( 0, ret_bytes, &p_sa_req->ov ); + } + else + { + sa_req_cb( GetLastError(), 0, &p_sa_req->ov ); + status = IB_ERROR; + } } AL_EXIT( AL_DBG_QUERY ); @@ -217,7 +234,6 @@ al_send_sa_req( } - void CALLBACK sa_req_cb( IN DWORD error_code, diff --git a/trunk/core/complib/cl_map.c b/trunk/core/complib/cl_map.c index cc3d3fbd..9af2471c 100644 --- a/trunk/core/complib/cl_map.c +++ b/trunk/core/complib/cl_map.c @@ -61,12 +61,440 @@ *****************************************************************************/ +#include #include #include #include #include +/****************************************************************************** +******************************************************************************* +************** ************ +************** IMPLEMENTATION OF RB MAP ************ +************** ************ +******************************************************************************* +******************************************************************************/ + + +/* + * Returns whether a given item is on the left of its parent. + */ +static boolean_t +__cl_rbmap_is_left_child( + IN const cl_rbmap_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + CL_ASSERT( p_item->p_up ); + CL_ASSERT( p_item->p_up != p_item ); + + return( p_item->p_up->p_left == p_item ); +} + + +/* + * Retrieve the pointer to the parent's pointer to an item. + */ +static cl_rbmap_item_t** +__cl_rbmap_get_parent_ptr_to_item( + IN cl_rbmap_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + CL_ASSERT( p_item->p_up ); + CL_ASSERT( p_item->p_up != p_item ); + + if( __cl_rbmap_is_left_child( p_item ) ) + return( &p_item->p_up->p_left ); + + CL_ASSERT( p_item->p_up->p_right == p_item ); + return( &p_item->p_up->p_right ); +} + + +/* + * Rotate a node to the left. This rotation affects the least number of links + * between nodes and brings the level of C up by one while increasing the depth + * of A one. Note that the links to/from W, X, Y, and Z are not affected. + * + * R R + * | | + * A C + * / \ / \ + * W C A Z + * / \ / \ + * B Z W B + * / \ / \ + * X Y X Y + */ +static void +__cl_rbmap_rot_left( + IN cl_rbmap_t* const p_map, + IN cl_rbmap_item_t* const p_item ) +{ + cl_rbmap_item_t **pp_root; + + CL_ASSERT( p_map ); + CL_ASSERT( p_item ); + CL_ASSERT( p_item->p_right != &p_map->nil ); + + pp_root = __cl_rbmap_get_parent_ptr_to_item( p_item ); + + /* Point R to C instead of A. */ + *pp_root = p_item->p_right; + /* Set C's parent to R. */ + (*pp_root)->p_up = p_item->p_up; + + /* Set A's right to B */ + p_item->p_right = (*pp_root)->p_left; + /* + * Set B's parent to A. We trap for B being NIL since the + * caller may depend on NIL not changing. + */ + if( (*pp_root)->p_left != &p_map->nil ) + (*pp_root)->p_left->p_up = p_item; + + /* Set C's left to A. */ + (*pp_root)->p_left = p_item; + /* Set A's parent to C. */ + p_item->p_up = *pp_root; +} + + +/* + * Rotate a node to the right. This rotation affects the least number of links + * between nodes and brings the level of A up by one while increasing the depth + * of C one. Note that the links to/from W, X, Y, and Z are not affected. + * + * R R + * | | + * C A + * / \ / \ + * A Z W C + * / \ / \ + * W B B Z + * / \ / \ + * X Y X Y + */ +static void +__cl_rbmap_rot_right( + IN cl_rbmap_t* const p_map, + IN cl_rbmap_item_t* const p_item ) +{ + cl_rbmap_item_t **pp_root; + + CL_ASSERT( p_map ); + CL_ASSERT( p_item ); + CL_ASSERT( p_item->p_left != &p_map->nil ); + + /* Point R to A instead of C. */ + pp_root = __cl_rbmap_get_parent_ptr_to_item( p_item ); + (*pp_root) = p_item->p_left; + /* Set A's parent to R. */ + (*pp_root)->p_up = p_item->p_up; + + /* Set C's left to B */ + p_item->p_left = (*pp_root)->p_right; + /* + * Set B's parent to C. We trap for B being NIL since the + * caller may depend on NIL not changing. + */ + if( (*pp_root)->p_right != &p_map->nil ) + (*pp_root)->p_right->p_up = p_item; + + /* Set A's right to C. */ + (*pp_root)->p_right = p_item; + /* Set C's parent to A. */ + p_item->p_up = *pp_root; +} + + +/* + * Balance a tree starting at a given item back to the root. + */ +static void +__cl_rbmap_ins_bal( + IN cl_rbmap_t* const p_map, + IN cl_rbmap_item_t* p_item ) +{ + cl_rbmap_item_t* p_grand_uncle; + + CL_ASSERT( p_map ); + CL_ASSERT( p_item ); + CL_ASSERT( p_item != &p_map->root ); + + while( p_item->p_up->color == CL_MAP_RED ) + { + if( __cl_rbmap_is_left_child( p_item->p_up ) ) + { + p_grand_uncle = p_item->p_up->p_up->p_right; + CL_ASSERT( p_grand_uncle ); + if( p_grand_uncle->color == CL_MAP_RED ) + { + p_grand_uncle->color = CL_MAP_BLACK; + p_item->p_up->color = CL_MAP_BLACK; + p_item->p_up->p_up->color = CL_MAP_RED; + p_item = p_item->p_up->p_up; + continue; + } + + if( !__cl_rbmap_is_left_child( p_item ) ) + { + p_item = p_item->p_up; + __cl_rbmap_rot_left( p_map, p_item ); + } + p_item->p_up->color = CL_MAP_BLACK; + p_item->p_up->p_up->color = CL_MAP_RED; + __cl_rbmap_rot_right( p_map, p_item->p_up->p_up ); + } + else + { + p_grand_uncle = p_item->p_up->p_up->p_left; + CL_ASSERT( p_grand_uncle ); + if( p_grand_uncle->color == CL_MAP_RED ) + { + p_grand_uncle->color = CL_MAP_BLACK; + p_item->p_up->color = CL_MAP_BLACK; + p_item->p_up->p_up->color = CL_MAP_RED; + p_item = p_item->p_up->p_up; + continue; + } + + if( __cl_rbmap_is_left_child( p_item ) ) + { + p_item = p_item->p_up; + __cl_rbmap_rot_right( p_map, p_item ); + } + p_item->p_up->color = CL_MAP_BLACK; + p_item->p_up->p_up->color = CL_MAP_RED; + __cl_rbmap_rot_left( p_map, p_item->p_up->p_up ); + } + } +} + + +void +cl_rbmap_insert( + IN cl_rbmap_t* const p_map, + IN cl_rbmap_item_t* const p_insert_at, + IN cl_rbmap_item_t* const p_item, + IN boolean_t left ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + CL_ASSERT( p_insert_at ); + CL_ASSERT( p_item ); + CL_ASSERT( p_map->root.p_up == &p_map->root ); + CL_ASSERT( p_map->root.color != CL_MAP_RED ); + CL_ASSERT( p_map->nil.color != CL_MAP_RED ); + + p_item->p_left = &p_map->nil; + p_item->p_right = &p_map->nil; + p_item->color = CL_MAP_RED; + + if( p_insert_at == cl_rbmap_end( p_map ) ) + { + p_map->root.p_left = p_item; + p_item->p_up = &p_map->root; + } + else + { + if( left ) + p_insert_at->p_left = p_item; + else + p_insert_at->p_right = p_item; + + p_item->p_up = p_insert_at; + } + + /* Increase the count. */ + p_map->count++; + + /* + * We have added depth to this section of the tree. + * Rebalance as necessary as we retrace our path through the tree + * and update colors. + */ + __cl_rbmap_ins_bal( p_map, p_item ); + + cl_rbmap_root( p_map )->color = CL_MAP_BLACK; + + /* + * Note that it is not necessary to re-color the nil node black because all + * red color assignments are made via the p_up pointer, and nil is never + * set as the value of a p_up pointer. + */ + +#ifdef _DEBUG_ + /* Set the pointer to the map in the map item for consistency checking. */ + p_item->p_map = p_map; +#endif +} + + +static void +__cl_rbmap_del_bal( + IN cl_rbmap_t* const p_map, + IN cl_rbmap_item_t* p_item ) +{ + cl_rbmap_item_t *p_uncle; + + while( (p_item->color != CL_MAP_RED) && (p_item->p_up != &p_map->root) ) + { + if( __cl_rbmap_is_left_child( p_item ) ) + { + p_uncle = p_item->p_up->p_right; + + if( p_uncle->color == CL_MAP_RED ) + { + p_uncle->color = CL_MAP_BLACK; + p_item->p_up->color = CL_MAP_RED; + __cl_rbmap_rot_left( p_map, p_item->p_up ); + p_uncle = p_item->p_up->p_right; + } + + if( p_uncle->p_right->color != CL_MAP_RED ) + { + if( p_uncle->p_left->color != CL_MAP_RED ) + { + p_uncle->color = CL_MAP_RED; + p_item = p_item->p_up; + continue; + } + + p_uncle->p_left->color = CL_MAP_BLACK; + p_uncle->color = CL_MAP_RED; + __cl_rbmap_rot_right( p_map, p_uncle ); + p_uncle = p_item->p_up->p_right; + } + p_uncle->color = p_item->p_up->color; + p_item->p_up->color = CL_MAP_BLACK; + p_uncle->p_right->color = CL_MAP_BLACK; + __cl_rbmap_rot_left( p_map, p_item->p_up ); + break; + } + else + { + p_uncle = p_item->p_up->p_left; + + if( p_uncle->color == CL_MAP_RED ) + { + p_uncle->color = CL_MAP_BLACK; + p_item->p_up->color = CL_MAP_RED; + __cl_rbmap_rot_right( p_map, p_item->p_up ); + p_uncle = p_item->p_up->p_left; + } + + if( p_uncle->p_left->color != CL_MAP_RED ) + { + if( p_uncle->p_right->color != CL_MAP_RED ) + { + p_uncle->color = CL_MAP_RED; + p_item = p_item->p_up; + continue; + } + + p_uncle->p_right->color = CL_MAP_BLACK; + p_uncle->color = CL_MAP_RED; + __cl_rbmap_rot_left( p_map, p_uncle ); + p_uncle = p_item->p_up->p_left; + } + p_uncle->color = p_item->p_up->color; + p_item->p_up->color = CL_MAP_BLACK; + p_uncle->p_left->color = CL_MAP_BLACK; + __cl_rbmap_rot_right( p_map, p_item->p_up ); + break; + } + } + p_item->color = CL_MAP_BLACK; +} + + +void +cl_rbmap_remove_item( + IN cl_rbmap_t* const p_map, + IN cl_rbmap_item_t* const p_item ) +{ + cl_rbmap_item_t *p_child, *p_del_item; + + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + CL_ASSERT( p_item ); + CL_ASSERT( p_item->p_map == p_map ); + + if( p_item == cl_rbmap_end( p_map ) ) + return; + + if( p_item->p_right == &p_map->nil ) + { + /* The item being removed has children on at most its left. */ + p_del_item = p_item; + p_child = p_del_item->p_left; + } + else if( p_item->p_left == &p_map->nil ) + { + /* The item being removed has children on at most its right. */ + p_del_item = p_item; + p_child = p_del_item->p_right; + } + else + { + /* + * The item being removed has children on both side. + * We select the item that will replace it. After removing + * the substitute item and rebalancing, the tree will have the + * correct topology. Exchanging the substitute for the item + * will finalize the removal. + */ + p_del_item = p_item->p_right; + CL_ASSERT( p_del_item != &p_map->nil ); + while( p_del_item->p_left != &p_map->nil ) + p_del_item = p_del_item->p_left; + p_child = p_del_item->p_right; + } + + /* Decrement the item count. */ + p_map->count--; + + /* + * This assignment may modify the parent pointer of the nil node. + * This is inconsequential. + */ + p_child->p_up = p_del_item->p_up; + (*__cl_rbmap_get_parent_ptr_to_item( p_del_item )) = p_child; // 2 right = 5 + + if( p_del_item->color != CL_MAP_RED ) + __cl_rbmap_del_bal( p_map, p_child ); + + /* + * Note that the splicing done below does not need to occur before + * the tree is balanced, since the actual topology changes are made by the + * preceding code. The topology is preserved by the color assignment made + * below (reader should be reminded that p_del_item == p_item in some cases). + */ + if( p_del_item != p_item ) + { + /* + * Finalize the removal of the specified item by exchanging it with + * the substitute which we removed above. + */ + p_del_item->p_up = p_item->p_up; + p_del_item->p_left = p_item->p_left; + p_del_item->p_right = p_item->p_right; + (*__cl_rbmap_get_parent_ptr_to_item( p_item )) = p_del_item; + p_item->p_right->p_up = p_del_item; + p_item->p_left->p_up = p_del_item; + p_del_item->color = p_item->color; + } + + CL_ASSERT( p_map->nil.color != CL_MAP_RED ); + +#ifdef _DEBUG_ + /* Clear the pointer to the map since the item has been removed. */ + p_item->p_map = NULL; +#endif +} + + /****************************************************************************** ******************************************************************************* ************** ************ diff --git a/trunk/core/complib/user/cl_timer.c b/trunk/core/complib/user/cl_timer.c index cd059edf..5c19cb84 100644 --- a/trunk/core/complib/user/cl_timer.c +++ b/trunk/core/complib/user/cl_timer.c @@ -144,7 +144,7 @@ cl_timer_stop( } -#define SEC_TO_MICRO 1000000 // s to µs conversion +#define SEC_TO_MICRO 1000000ULL // s to µs conversion uint64_t cl_get_time_stamp( void ) @@ -157,7 +157,7 @@ cl_get_time_stamp( void ) if( !QueryPerformanceCounter( &tick_count ) ) return( 0 ); - return( tick_count.QuadPart * SEC_TO_MICRO / frequency.QuadPart ); + return( tick_count.QuadPart / (frequency.QuadPart / SEC_TO_MICRO) ); } uint32_t diff --git a/trunk/hw/mt23108/kernel/hca_data.c b/trunk/hw/mt23108/kernel/hca_data.c index e098a40f..f7228c92 100644 --- a/trunk/hw/mt23108/kernel/hca_data.c +++ b/trunk/hw/mt23108/kernel/hca_data.c @@ -1428,73 +1428,6 @@ mlnx_map_vapi_rna_type( } } -///////////////////////////////////////////////////////// -// Convert a Mellanox CQE into IBAL format -///////////////////////////////////////////////////////// -void -mlnx_conv_vapi_cqe( - IN VAPI_wc_desc_t *desc_p, - OUT ib_wc_t *wc_p ) -{ - wc_p->wr_id = desc_p->id; - - wc_p->status = mlnx_map_vapi_cqe_status(desc_p->status); - wc_p->wc_type = mlnx_map_vapi_cqe_type(desc_p->opcode); - - wc_p->length = (IB_COMP_SUCCESS == desc_p->status) ? desc_p->byte_len : 0; - wc_p->recv.conn.recv_opt = 0; // TBD: RC support, SE - - CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("cqe type %d length 0x%x\n", wc_p->wc_type, wc_p->length)); - - // Immediate data - if (desc_p->imm_data_valid) - { - wc_p->recv.conn.recv_opt |= IB_RECV_OPT_IMMEDIATE; - wc_p->recv.conn.immediate_data = cl_ntoh32 (desc_p->imm_data); - } - // GRH - if (desc_p->grh_flag) - { - wc_p->recv.conn.recv_opt |= IB_RECV_OPT_GRH_VALID; - } - - switch(desc_p->remote_node_addr.type) - { - case VAPI_RNA_RD: - // TBD: RD Support - CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("Unsupported RD\n")); - break; - - case VAPI_RNA_UD: - // CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("Supported UD\n")); - wc_p->recv.ud.remote_qp = cl_ntoh32(desc_p->remote_node_addr.qp_ety.qp); - wc_p->recv.ud.pkey_index = (uint16_t)desc_p->pkey_ix; - wc_p->recv.ud.remote_lid = cl_ntoh16(desc_p->remote_node_addr.slid); - wc_p->recv.ud.remote_sl = desc_p->remote_node_addr.sl; - // wc_p->recv.ud.path_bits = desc_p->remote_node_addr.ee_dlid.dst_path_bits; // PATH: - wc_p->recv.ud.path_bits = 0; -#if 0 - printk ("********* MLNX *************\n"); - printk ("rem_qp = 0x%x pbits = 0x%x pkey_idx = 0x%x\n", - wc_p->recv.ud.remote_qp, wc_p->recv.ud.path_bits, - wc_p->recv.ud.pkey_index ); - printk ("GOT PBITS 0x%x\n", desc_p->remote_node_addr.ee_dlid.dst_path_bits); - printk ("*****************\n"); -#endif - - break; - - case VAPI_RNA_RAW_ETY: - // TBD: RAW ETH - break; - - case VAPI_RNA_RAW_IPV6: - // TBD: RAW IPV6 - default: - break; - } -} - ////////////////////////////////////////////////////////////// // Convert from VAPI memory-region attributes to IBAL ////////////////////////////////////////////////////////////// @@ -1788,12 +1721,6 @@ mlnx_conv_qp_modify_attr( OUT VAPI_qp_attr_t *qp_attr_p, OUT VAPI_qp_attr_mask_t *attr_mask_p) { - /* VAPI doesn't support modifying the WQE depth ever. */ - if( modify_attr_p->state.rtr.opts & IB_MOD_QP_SQ_DEPTH || - modify_attr_p->state.rtr.opts & IB_MOD_QP_RQ_DEPTH ) - { - return IB_UNSUPPORTED; - } qp_attr_p->qp_state = mlnx_map_ibal_qp_state(modify_attr_p->req_state); *attr_mask_p = QP_ATTR_QP_STATE; @@ -1822,6 +1749,13 @@ mlnx_conv_qp_modify_attr( break; case IB_QPS_RTR: + /* VAPI doesn't support modifying the WQE depth ever. */ + if( modify_attr_p->state.rtr.opts & IB_MOD_QP_SQ_DEPTH || + modify_attr_p->state.rtr.opts & IB_MOD_QP_RQ_DEPTH ) + { + return IB_UNSUPPORTED; + } + *attr_mask_p |= QP_ATTR_RQ_PSN | QP_ATTR_DEST_QP_NUM | QP_ATTR_QP_OUS_RD_ATOM | @@ -1888,6 +1822,13 @@ mlnx_conv_qp_modify_attr( break; case IB_QPS_RTS: + /* VAPI doesn't support modifying the WQE depth ever. */ + if( modify_attr_p->state.rts.opts & IB_MOD_QP_SQ_DEPTH || + modify_attr_p->state.rts.opts & IB_MOD_QP_RQ_DEPTH ) + { + return IB_UNSUPPORTED; + } + *attr_mask_p |= QP_ATTR_SQ_PSN | QP_ATTR_RETRY_COUNT | QP_ATTR_RNR_RETRY | diff --git a/trunk/hw/mt23108/kernel/hca_data.h b/trunk/hw/mt23108/kernel/hca_data.h index 83cef33b..15193593 100644 --- a/trunk/hw/mt23108/kernel/hca_data.h +++ b/trunk/hw/mt23108/kernel/hca_data.h @@ -494,11 +494,6 @@ int mlnx_map_vapi_rna_type( IN VAPI_remote_node_addr_type_t rna); -void -mlnx_conv_vapi_cqe( - IN VAPI_wc_desc_t *desc_p, - OUT ib_wc_t *wc_p ); - void mlnx_conv_vapi_mr_attr( IN ib_pd_handle_t pd_h, diff --git a/trunk/hw/mt23108/kernel/hca_direct.c b/trunk/hw/mt23108/kernel/hca_direct.c index 7148c35a..7bbdc3b2 100644 --- a/trunk/hw/mt23108/kernel/hca_direct.c +++ b/trunk/hw/mt23108/kernel/hca_direct.c @@ -392,42 +392,8 @@ mlnx_poll_cq ( hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl; -#if MLNX_POLL_NATIVE return THHUL_cqm_poll4wc(hobul_p->hhul_hndl, hhul_cq_hndl, pp_free_wclist, pp_done_wclist ); -#else - for (wc_p = *pp_free_wclist; wc_p; wc_p = wc_p->p_next) { - // Terminate the completed list (MUST be here) - *pp_done_wclist = NULL; - - ret = THHUL_cqm_poll4cqe(hobul_p->hhul_hndl, hhul_cq_hndl, &comp_desc); - // CQ_EMPTY is not an error - if (HH_CQ_EMPTY == ret) { - status = IB_NOT_FOUND; // This is a successful completion (no entries) - break; - } - // Handle real errors - if (HH_OK != ret) { - status = IB_ERROR; - goto cleanup; - } - - status = IB_SUCCESS; - - // Convert the CQE and add to list (no memset()) - mlnx_conv_vapi_cqe( &comp_desc, wc_p ); - *pp_done_wclist = wc_p; - pp_done_wclist = &wc_p->p_next; - CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("status %d done_list 0x%p\n", comp_desc.status, *pp_done_wclist)); - } - - Update free list to point to the first unused qce - if (IB_NOT_FOUND == status && wc_p != *pp_free_wclist) - status = IB_SUCCESS; - *pp_free_wclist = wc_p; - - return status; -#endif cleanup: CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); diff --git a/trunk/hw/mt23108/kernel/hca_verbs.c b/trunk/hw/mt23108/kernel/hca_verbs.c index 55326e8f..b61dc7dd 100644 --- a/trunk/hw/mt23108/kernel/hca_verbs.c +++ b/trunk/hw/mt23108/kernel/hca_verbs.c @@ -2053,12 +2053,13 @@ mlnx_resize_cq ( if( p_umv_buf && p_umv_buf->command ) { // For user mode calls - obtain and verify the vendor information - if ((p_umv_buf->input_size - sizeof (u_int32_t)) != hobul_p->cq_ul_resources_sz || - NULL == p_umv_buf->p_inout_buf) { - status = IB_INVALID_PARAMETER; - goto cleanup_locked; - } - cq_ul_resources_p = (void *)p_umv_buf->p_inout_buf; + if( p_umv_buf->input_size != hobul_p->cq_ul_resources_sz || + NULL == p_umv_buf->p_inout_buf ) + { + status = IB_INVALID_PARAMETER; + goto cleanup_locked; + } + cq_ul_resources_p = (void *)p_umv_buf->p_inout_buf; } else { // for kernel mode calls - obtain the saved app resources. Use prep->call->done sequence @@ -2259,7 +2260,7 @@ setup_ci_interface( IN const ib_net64_t ca_guid, IN OUT ci_interface_t *p_interface ) { - cl_memclr(p_interface, sizeof(p_interface)); + cl_memclr(p_interface, sizeof(*p_interface)); /* Guid of the CA. */ p_interface->guid = ca_guid; diff --git a/trunk/hw/mt23108/user/mlnx_ual_cq.c b/trunk/hw/mt23108/user/mlnx_ual_cq.c index 282529e0..32473df4 100644 --- a/trunk/hw/mt23108/user/mlnx_ual_cq.c +++ b/trunk/hw/mt23108/user/mlnx_ual_cq.c @@ -357,7 +357,7 @@ mlnx_pre_resize_cq ( ("After resize_cq_prep *p_size = %d\n", *p_size)); p_umv_buf->p_inout_buf = p_cq_ul_resources; - p_umv_buf->input_size = (uint32_t)p_hobul->p_hca_ul_info->cq_ul_resources_sz + sizeof(uint32_t); + p_umv_buf->input_size = (uint32_t)p_hobul->p_hca_ul_info->cq_ul_resources_sz; p_umv_buf->output_size = p_umv_buf->input_size; p_umv_buf->command = TRUE; @@ -404,7 +404,7 @@ mlnx_post_resize_cq ( break; } else if (p_umv_buf->output_size != - (p_hobul->p_hca_ul_info->cq_ul_resources_sz + sizeof(uint32_t)) ) + (p_hobul->p_hca_ul_info->cq_ul_resources_sz) ) { CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, ("Bad priv buf size %ld\n", p_umv_buf->output_size)); diff --git a/trunk/hw/mt23108/user/mlnx_ual_osbypass.c b/trunk/hw/mt23108/user/mlnx_ual_osbypass.c index c02ed4c9..3637bd3c 100644 --- a/trunk/hw/mt23108/user/mlnx_ual_osbypass.c +++ b/trunk/hw/mt23108/user/mlnx_ual_osbypass.c @@ -265,68 +265,6 @@ map_mtoi_cqe_type ( } -void -map_mtoi_wcqe ( - IN VAPI_wc_desc_t *p_m_cqe, - OUT ib_wc_t *p_i_cqe) -{ - p_i_cqe->wr_id = p_m_cqe->id; - p_i_cqe->status = map_mtoi_cqe_status (p_m_cqe->status); - p_i_cqe->wc_type = map_mtoi_cqe_type (p_m_cqe->opcode); - p_i_cqe->length = (IB_COMP_SUCCESS == p_m_cqe->status) ? - p_m_cqe->byte_len : 0; - - CL_TRACE (MLNX_TRACE_LVL_8, mlnx_dbg_lvl, - ("cqe type %d length 0x%x status %d\n", - p_i_cqe->wc_type, p_i_cqe->length, p_i_cqe->status)); - - /* - * FIXME: RC case - */ - p_i_cqe->recv.conn.recv_opt = 0; - - if (p_m_cqe->imm_data_valid) - { - p_i_cqe->recv.conn.recv_opt |= IB_RECV_OPT_IMMEDIATE; - p_i_cqe->recv.conn.immediate_data = CL_NTOH32 (p_m_cqe->imm_data); - } - - if (p_m_cqe->grh_flag) - { - p_i_cqe->recv.conn.recv_opt |= IB_RECV_OPT_GRH_VALID; - } - - switch (p_m_cqe->remote_node_addr.type) - { - case VAPI_RNA_UD: - p_i_cqe->recv.ud.remote_qp = - CL_NTOH32 (p_m_cqe->remote_node_addr.qp_ety.qp); - p_i_cqe->recv.ud.pkey_index = (uint16_t)p_m_cqe->pkey_ix; - p_i_cqe->recv.ud.remote_lid = - CL_NTOH16 (p_m_cqe->remote_node_addr.slid); - p_i_cqe->recv.ud.remote_sl = p_m_cqe->remote_node_addr.sl; - p_i_cqe->recv.ud.path_bits = 0; - break; - - case VAPI_RNA_RD: - CL_TRACE (MLNX_TRACE_LVL_8, mlnx_dbg_lvl, ("Unsupported RD\n")); - break; - - case VAPI_RNA_RAW_ETY: - CL_TRACE (MLNX_TRACE_LVL_8, mlnx_dbg_lvl, ("Unsupported RAW_ETY\n")); - break; - - case VAPI_RNA_RAW_IPV6: - CL_TRACE (MLNX_TRACE_LVL_8, mlnx_dbg_lvl, ("Unsupported RAW_IPV6\n")); - break; - - default: - CL_TRACE (MLNX_TRACE_LVL_8, mlnx_dbg_lvl, ("Unknown type\n")); - break; - } -} - - static VAPI_mrw_acl_t map_itom_access_ctrl ( IN ib_access_t i_acl) @@ -492,50 +430,10 @@ mlnx_poll_cq ( status = IB_INVALID_PARAMETER; return status; } -#if TRUE - status = THHUL_cqm_poll4wc(p_hobul->hhul_hca_hndl, p_cq_info->hhul_cq_hndl, - pp_free_wclist, pp_done_wclist ); -#else - for (p_i_cqe = *pp_free_wclist; p_i_cqe; p_i_cqe = p_i_cqe->p_next) - { - *pp_done_wclist = NULL; - - hh_ret = THHUL_cqm_poll4cqe (p_hobul->hhul_hca_hndl, - p_cq_info->hhul_cq_hndl, - &m_cqe); - if (HH_CQ_EMPTY == hh_ret) - { - status = IB_NOT_FOUND; - break; - } - - /* - * Errors cases - */ - if (HH_OK != hh_ret) - { - CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, - ("poll4cqe get error status %d\n", hh_ret)); - status = IB_ERROR; - return status; - } - map_mtoi_wcqe (&m_cqe, p_i_cqe); - *pp_done_wclist = p_i_cqe; - pp_done_wclist = (ib_wc_t **)&p_i_cqe->p_next; - status = IB_SUCCESS; - } + status = THHUL_cqm_poll4wc(p_hobul->hhul_hca_hndl, p_cq_info->hhul_cq_hndl, + pp_free_wclist, pp_done_wclist ); - /* - * Update free list to point to the first unused cqe - */ - if (IB_NOT_FOUND == status && p_i_cqe != *pp_free_wclist) - { - status = IB_SUCCESS; - } - - *pp_free_wclist = p_i_cqe; -#endif FUNC_EXIT; return status; } diff --git a/trunk/hw/mt23108/vapi/Hca/hcahal/tavor/thh_hob/thh_hob.c b/trunk/hw/mt23108/vapi/Hca/hcahal/tavor/thh_hob/thh_hob.c index 845a4c79..ef80816d 100644 --- a/trunk/hw/mt23108/vapi/Hca/hcahal/tavor/thh_hob/thh_hob.c +++ b/trunk/hw/mt23108/vapi/Hca/hcahal/tavor/thh_hob/thh_hob.c @@ -3130,7 +3130,7 @@ HH_ret_t THH_hob_get_gid_tbl_local( HH_hca_hndl_t hca_hndl, MTL_DEBUG4("THH_hob_get_gid_tbl_local: hca_hndl=0x%p, port= %d, return table len = %d\n", hca_hndl, port, tbl_len_in); - if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK && use_mad_query_for_gid_prefix) { MTL_ERROR1("THH_hob_get_gid_tbl: NOT IN TASK CONTEXT)\n"); return HH_ERR; } @@ -3173,19 +3173,19 @@ HH_ret_t THH_hob_get_gid_tbl_local( HH_hca_hndl_t hca_hndl, return HH_EINVAL; } - mad_frame_in = TNMALLOC(u_int8_t, IB_MAD_SIZE); - if ( !mad_frame_in ) { - return HH_EAGAIN; - } - mad_frame_out = TNMALLOC(u_int8_t, IB_MAD_SIZE); - if ( !mad_frame_out ) { - FREE(mad_frame_in); - return HH_EAGAIN; - } - /* get GID table using MAD commands in THH_cmd object */ if (use_mad_query_for_gid_prefix == TRUE) { + + mad_frame_in = TNMALLOC(u_int8_t, IB_MAD_SIZE); + if ( !mad_frame_in ) { + return HH_EAGAIN; + } + mad_frame_out = TNMALLOC(u_int8_t, IB_MAD_SIZE); + if ( !mad_frame_out ) { + FREE(mad_frame_in); + return HH_EAGAIN; + } /* First, get the GID prefix from via MAD query */ memset(mad_frame_in, 0, sizeof(mad_frame_in)); memset(mad_frame_out, 0, sizeof(mad_frame_out)); @@ -3245,30 +3245,24 @@ HH_ret_t THH_hob_get_gid_tbl_local( HH_hca_hndl_t hca_hndl, } } } + FREE(mad_frame_out); + FREE(mad_frame_in); } else { memset(&port_info, 0, sizeof(port_info)); hh_ret = THH_hob_get_qpm ( thh_hob_p, &qpm ); if (hh_ret != HH_OK) { MTL_ERROR2( "THH_hob_get_qpm: invalid QPM handle (ret= %d)\n", hh_ret); - FREE(mad_frame_out); - FREE(mad_frame_in); return HH_EINVAL; } /*** warning C4242: 'function' : conversion from 'int' to 'u_int8_t', possible loss of data ***/ hh_ret = THH_qpm_get_all_sgids(qpm,port,(u_int8_t)num_guids, param_gid_p); if (hh_ret != HH_OK) { MTL_ERROR2( "THH_qpm_get_all_sgids failed (ret= %d)\n", hh_ret); - FREE(mad_frame_out); - FREE(mad_frame_in); return HH_EINVAL; } - FREE(mad_frame_out); - FREE(mad_frame_in); return HH_OK; } - FREE(mad_frame_out); - FREE(mad_frame_in); return HH_OK; } /* THH_get_gid_tbl */ /****************************************************************************** diff --git a/trunk/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.c b/trunk/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.c index 16635716..942ef9fd 100644 --- a/trunk/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.c +++ b/trunk/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.c @@ -1243,185 +1243,6 @@ static MT_bool cq_transition_to_resized_buf( #define CQE_OPCODE_BIT_MASK MASK32(MT_BIT_SIZE(tavorprm_completion_queue_entry_st,opcode)) #define CQE_OPCODE_DWORD_OFFSET MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,opcode)>>2 #define CQE_OPCODE_SHIFT (MT_BIT_OFFSET(tavorprm_completion_queue_entry_st,opcode) & MASK32(5)) -HH_ret_t THHUL_cqm_poll4cqe( - /*IN*/ HHUL_hca_hndl_t hca_hndl, - /*IN*/ HHUL_cq_hndl_t cq, - /*OUT*/ VAPI_wc_desc_t *vapi_cqe_p -) -{ - THHUL_cq_t *thhul_cq_p= (THHUL_cq_t*)cq; - volatile u_int32_t *cur_cqe; - u_int32_t wqe_addr_32lsb,next_wqe_addr_32lsb; - IB_wqpn_t qpn; - u_int8_t opcode; - u_int8_t dbd_bit; - VAPI_special_qp_t qp_type; - VAPI_ts_type_t qp_ts_type; - u_int32_t i,dbd_cnt; - HH_ret_t rc; - u_int32_t cqe_cpy[CQE_SZ>>2]; /* CQE copy */ - /* The CQE copy is required for 2 reasons: - * 1) Hold in CPU endianess. - * 2) Free real CQE as soon as possible in order to release CQ lock quickly. - */ - - if (MOSAL_EXPECT_FALSE(thhul_cq_p == NULL)) { - MTL_ERROR1("THHUL_cqm_poll4cqe: NULL CQ handle.\n"); - return HH_EINVAL_CQ_HNDL; - } - - MOSAL_spinlock_dpc_lock(&(thhul_cq_p->cq_lock)); - - /* Check if CQE at consumer index is valid */ - cur_cqe= (volatile u_int32_t *) - (thhul_cq_p->cur_buf.cqe_buf_base + (thhul_cq_p->cur_buf.consumer_index << LOG2_CQE_SZ)); - if (is_cqe_hw_own(cur_cqe) && /* CQE is still in HW ownership */ - (!cq_transition_to_resized_buf(thhul_cq_p, &cur_cqe)) ) { - - MOSAL_spinlock_unlock(&(thhul_cq_p->cq_lock)); -#if 0 - THHUL_cqm_dump_cq(cq); -#endif - return HH_CQ_EMPTY; - } - - /* Remove Copy of all cqe, copy as nedded only */ -#if 0 /* original code */ - /* Make CQE copy in correct endianess */ - for (i= 0; i < (CQE_SZ>>2); i++) { - cqe_cpy[i]= MOSAL_be32_to_cpu(cur_cqe[i]); - } - /* Extract QP/WQE context fields from the CQE */ - wqe_addr_32lsb= (cqe_cpy[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,wqe_adr)>>2] & - (~MASK32(CQE_WQE_ADR_BIT_SZ)) ); - qpn= (cqe_cpy[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,my_qpn)>>2] & MASK32(24) ); - vapi_cqe_p->local_qp_num= qpn; - /* new CQE: completion status is taken from "opcode" field */ - opcode= MT_EXTRACT_ARRAY32(cqe_cpy, - MT_BIT_OFFSET(tavorprm_completion_queue_entry_st,opcode), - MT_BIT_SIZE(tavorprm_completion_queue_entry_st,opcode)); -#endif - - /* Extract QP/WQE context fields from the CQE */ - /* Byte 6 */ - wqe_addr_32lsb= MOSAL_be32_to_cpu(cur_cqe[CQE_WQE_ADDR_BYTE_OFFSET]) & - CQE_WQE_ADDR_BIT_MASK; - - /* Byte 0*/ - qpn= MOSAL_be32_to_cpu(cur_cqe[CQE_MY_QPN_BYTE_OFFSET]) & CQE_MY_QPN_BYTE_BIT_MASK; - - /* Byte 1 */ - cqe_cpy[CQE_MY_EE_DWORD_OFFSET] = MOSAL_be32_to_cpu(cur_cqe[CQE_MY_EE_DWORD_OFFSET]); - /* Byte 2 */ - cqe_cpy[CQE_RQPN_DWORD_OFFSET] = MOSAL_be32_to_cpu(cur_cqe[CQE_RQPN_DWORD_OFFSET]); - /* Byte 3 */ - cqe_cpy[CQE_RLID_DWORD_OFFSET] = MOSAL_be32_to_cpu(cur_cqe[CQE_RLID_DWORD_OFFSET]); - /* Byte 7 Convert S,Opcode,Owner fileld to Be */ - cqe_cpy[CQE_S_DWORD_OFFSET] = MOSAL_be32_to_cpu(cur_cqe[CQE_S_DWORD_OFFSET]); - - /* Byte 4 */ - vapi_cqe_p->imm_data = MOSAL_be32_to_cpu(cur_cqe[CQE_IMMEDIATE_DWORD_OFFSET]); - /* Byte 5 */ - vapi_cqe_p->byte_len= MOSAL_be32_to_cpu(cur_cqe[CQE_BYTE_CNT_DWORD_OFFSET]); - - /* new CQE: completion status is taken from "opcode" field */ - - opcode=(cqe_cpy[CQE_OPCODE_DWORD_OFFSET]>>CQE_OPCODE_SHIFT) & CQE_OPCODE_BIT_MASK; - - - if (MOSAL_EXPECT_TRUE((opcode & CQE_ERROR_STATUS_MASK) != CQE_ERROR_STATUS_MASK)) { /* Completed OK */ - MTPERF_TIME_START(free_cqe); - free_cqe(thhul_cq_p,cur_cqe); /* Free original CQE and update consumer index */ - MTPERF_TIME_END(free_cqe); - - /* DEBUG: Sanity check that the same WQE is not used twice simultaneosly */ -#ifdef THHUL_CQM_DEBUG_WQE_REUSE - /* Get next CQE and check if valid and NDA equals freed CQE's */ - cur_cqe= (volatile u_int32_t *) - (thhul_cq_p->cur_buf.cqe_buf_base + (thhul_cq_p->cur_buf.consumer_index << LOG2_CQE_SZ)); - if ((!is_cqe_hw_own(cur_cqe)) && - ( (MOSAL_be32_to_cpu( - cur_cqe[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,wqe_adr)>>2]) & - (~MASK32(CQE_WQE_ADR_BIT_SZ)) ) == wqe_addr_32lsb) ){ - MTL_ERROR1(MT_FLFMT("%s: Duplicate NDA on next CQE (NDA=0x%X , consumer index=%u,%u)"), - __func__, wqe_addr_32lsb, - thhul_cq_p->cur_buf.consumer_index-1, thhul_cq_p->cur_buf.consumer_index); - THHUL_cqm_dump_cq(cq); - } -#endif - -#ifndef IVAPI_THH - rc= THHUL_qpm_comp_ok(thhul_cq_p->qpm, qpn, wqe_addr_32lsb, - &qp_type,&qp_ts_type,&(vapi_cqe_p->id),&(vapi_cqe_p->free_res_count)); -#else - rc= THHUL_qpm_comp_ok(thhul_cq_p->qpm, qpn, wqe_addr_32lsb, - &qp_type,&qp_ts_type,&(vapi_cqe_p->id),&(vapi_cqe_p->free_res_count),NULL); -#endif - - MOSAL_spinlock_unlock(&(thhul_cq_p->cq_lock)); - - if (MOSAL_EXPECT_FALSE(rc != HH_OK)) { - MTL_ERROR1("THHUL_cqm_poll4cqe: Failed updating associated QP.\n"); - for (i= 0; i < (CQE_SZ>>2); i++) { - MTL_ERROR1(MT_FLFMT("CQ[0x%X][%u][%u]=0x%X"),thhul_cq_p->cq_num, - (thhul_cq_p->cur_buf.consumer_index - 1) & MASK32(thhul_cq_p->cur_buf.log2_num_o_cqes), - i, cqe_cpy[i]); - } - return HH_EFATAL; /* unexpected error */ - } - /* Extract the rest of the CQE fields into vapi_cqe_p*/ - rc= extract_cqe_new(cqe_cpy,vapi_cqe_p,qp_type,qp_ts_type,opcode); - vapi_cqe_p->status= VAPI_SUCCESS; - - } else { /* Completion with error */ - - /* Make CQE copy in correct endianess */ - for (i= 0; i < (CQE_SZ>>2); i++) { - cqe_cpy[i]= MOSAL_be32_to_cpu(cur_cqe[i]); - } - - MTL_DEBUG4("THHUL_cqm_poll4cqe: completion with error: cq=%d consumer_index=%d\n", - thhul_cq_p->cq_num,thhul_cq_p->cur_buf.consumer_index); - DUMP_CQE(thhul_cq_p->cq_num,thhul_cq_p->cur_buf.consumer_index,cur_cqe); - rc= THHUL_qpm_comp_err(thhul_cq_p->qpm, qpn, wqe_addr_32lsb, - &(vapi_cqe_p->id),&(vapi_cqe_p->free_res_count),&next_wqe_addr_32lsb,&dbd_bit); - if (rc != HH_OK) { - MTL_ERROR1("THHUL_cqm_poll4cqe: Failed updating associated QP (QPn=0x%X , CQn=0x%X).\n", - qpn, thhul_cq_p->cq_num); - MOSAL_spinlock_unlock(&(thhul_cq_p->cq_lock)); - return HH_EFATAL; /* unexpected error */ - } - vapi_cqe_p->status= decode_error_syndrome((tavor_if_comp_status_t)MT_EXTRACT_ARRAY32(cqe_cpy, - CQE_ERROR_SYNDROM_BIT_OFFSET, CQE_ERROR_SYNDROM_BIT_SIZE) ); - vapi_cqe_p->vendor_err_syndrome= MT_EXTRACT_ARRAY32(cqe_cpy, - CQE_ERROR_VENDOR_SYNDROM_BIT_OFFSET, CQE_ERROR_VENDOR_SYNDROM_BIT_SIZE); - dbd_cnt= MT_EXTRACT_ARRAY32(cqe_cpy,CQE_ERROR_DBDCNT_BIT_OFFSET, CQE_ERROR_DBDCNT_BIT_SIZE); - if ((next_wqe_addr_32lsb == THHUL_QPM_END_OF_WQE_CHAIN) || /* End of WQE chain */ - ((dbd_cnt + 1 - dbd_bit) == 0) ) { /* or dbd counter reached 0 */ - if ((next_wqe_addr_32lsb == THHUL_QPM_END_OF_WQE_CHAIN) && (dbd_cnt > 0)) { - MTL_ERROR1(MT_FLFMT("%s: CQ[0x%X]:CQE[0x%X]: Reached end of chain while dbd_cnt==%u"), - __func__, thhul_cq_p->cq_num, thhul_cq_p->cur_buf.consumer_index, dbd_cnt); - } - MTPERF_TIME_START(free_cqe); - free_cqe(thhul_cq_p,cur_cqe); /* Free original CQE and update consumer index */ - MTPERF_TIME_END(free_cqe); - } else { - recycle_cqe(cur_cqe, next_wqe_addr_32lsb, dbd_cnt - dbd_bit); - } - MOSAL_spinlock_unlock(&(thhul_cq_p->cq_lock)); - /* Only WQE-ID, free_res_count and status are required for completion with error. - * No other CQE fields are extracted (see IB-spec. 11.4.2.1). - * Even though, for the sake of some legacy code: - * ...putting an opcode to distinguish completion of SQ from RQ*/ - if (opcode == CQE_ERROR_ON_SQ) { - vapi_cqe_p->opcode= VAPI_CQE_SQ_SEND_DATA; - } else { /* receive queue completion */ - vapi_cqe_p->opcode= VAPI_CQE_RQ_SEND_DATA; - } - } - - return rc; -} #ifdef WIN32 /* Successful completion */ @@ -1822,185 +1643,6 @@ static void rearm_cq(THHUL_cq_t *cq_p, MT_bool solicitedNotification) { } -/* This code is mainly from poll4cqe with rearm_cqe if next_cqe available - */ -HH_ret_t THHUL_cqm_poll_and_rearm_cq( - /*IN*/ HHUL_hca_hndl_t hca_hndl, - /*IN*/ HHUL_cq_hndl_t cq, - /*IN*/ int solicitedNotification, - /*OUT*/ VAPI_wc_desc_t *vapi_cqe_p -) -{ - - THHUL_cq_t *thhul_cq_p= (THHUL_cq_t*)cq; - volatile u_int32_t *cur_cqe; - u_int32_t wqe_addr_32lsb,next_wqe_addr_32lsb; - IB_wqpn_t qpn; - u_int8_t opcode; - u_int8_t dbd_bit; - VAPI_special_qp_t qp_type; - VAPI_ts_type_t qp_ts_type; - u_int32_t i,dbd_cnt; - HH_ret_t rc; - u_int32_t cqe_cpy[CQE_SZ>>2]; /* CQE copy */ - /* The CQE copy is required for 2 reasons: - * 1) Hold in CPU endianess. - * 2) Free real CQE as soon as possible in order to release CQ lock quickly. - */ - - if (MOSAL_EXPECT_FALSE(thhul_cq_p == NULL)) { - MTL_ERROR1("THHUL_cqm_poll4cqe: NULL CQ handle.\n"); - return HH_EINVAL_CQ_HNDL; - } - - MOSAL_spinlock_dpc_lock(&(thhul_cq_p->cq_lock)); - - /* Check if CQE at consumer index is valid */ - cur_cqe= (volatile u_int32_t *) - (thhul_cq_p->cur_buf.cqe_buf_base + (thhul_cq_p->cur_buf.consumer_index << LOG2_CQE_SZ)); - if (is_cqe_hw_own(cur_cqe)) { /* CQE is still in HW ownership */ - - rearm_cq(thhul_cq_p, (MT_bool)solicitedNotification); - /* Tavor actually solves the race condition where the s/w may have missed the - next valid entry being written, just prior to rearming the CQ. So we really - don't need to repoll the cq entry, since Tavor guarantees that we will see - an interrupt if it happened to go valid before or after rearming the CQ - */ - - MOSAL_spinlock_unlock(&(thhul_cq_p->cq_lock)); -#if 0 - THHUL_cqm_dump_cq(cq); -#endif - return HH_CQ_EMPTY; - } - - - /* Remove Copy of all cqe, copy as nedded only */ -#if 0 - /* Make CQE copy in correct endianess */ - for (i= 0; i < (CQE_SZ>>2); i++) { - cqe_cpy[i]= MOSAL_be32_to_cpu(cur_cqe[i]); - } -#endif - - /* Extract QP/WQE context fields from the CQE */ - - wqe_addr_32lsb= MOSAL_be32_to_cpu(cur_cqe[CQE_WQE_ADDR_BYTE_OFFSET]) & - CQE_WQE_ADDR_BIT_MASK; - - qpn= MOSAL_be32_to_cpu(cur_cqe[CQE_MY_QPN_BYTE_OFFSET]) & CQE_MY_QPN_BYTE_BIT_MASK; - - cqe_cpy[CQE_MY_EE_DWORD_OFFSET] = MOSAL_be32_to_cpu(cur_cqe[CQE_MY_EE_DWORD_OFFSET]); - cqe_cpy[CQE_RQPN_DWORD_OFFSET] = MOSAL_be32_to_cpu(cur_cqe[CQE_RQPN_DWORD_OFFSET]); - cqe_cpy[CQE_RLID_DWORD_OFFSET] = MOSAL_be32_to_cpu(cur_cqe[CQE_RLID_DWORD_OFFSET]); - cqe_cpy[CQE_S_DWORD_OFFSET] = MOSAL_be32_to_cpu(cur_cqe[CQE_S_DWORD_OFFSET]); - - vapi_cqe_p->imm_data = MOSAL_be32_to_cpu(cur_cqe[CQE_IMMEDIATE_DWORD_OFFSET]); - vapi_cqe_p->byte_len= MOSAL_be32_to_cpu(cur_cqe[CQE_BYTE_CNT_DWORD_OFFSET]); - - /* new CQE: completion status is taken from "opcode" field */ - - opcode=(cqe_cpy[CQE_OPCODE_DWORD_OFFSET]>>CQE_OPCODE_SHIFT) & CQE_OPCODE_BIT_MASK; - - - if (MOSAL_EXPECT_TRUE((opcode & CQE_ERROR_STATUS_MASK) != CQE_ERROR_STATUS_MASK)) { /* Completed OK */ - HH_ret_t rc_rearm; - volatile u_int32_t *next_cqe; - - MTPERF_TIME_START(free_cqe); - free_cqe(thhul_cq_p,cur_cqe); /* Free original CQE and update consumer index */ - MTPERF_TIME_END(free_cqe); - - // form pointer to next cqe that would be serviced next - next_cqe= (volatile u_int32_t *) - (thhul_cq_p->cur_buf.cqe_buf_base + (thhul_cq_p->cur_buf.consumer_index << LOG2_CQE_SZ)); - - - if (is_cqe_hw_own(next_cqe)) { /* CQE is still in HW ownership */ - rearm_cq(thhul_cq_p, (MT_bool)solicitedNotification); - // check again - next_cqe= (volatile u_int32_t *) - (thhul_cq_p->cur_buf.cqe_buf_base + (thhul_cq_p->cur_buf.consumer_index << LOG2_CQE_SZ)); - if (is_cqe_hw_own(next_cqe)) { /* CQE is still in HW ownership */ - rc_rearm = HH_COMPLETED; - } else { - rc_rearm = HH_POLL_NEEDED; - } - } - else - { - rc_rearm = HH_OK; - } - - - MOSAL_spinlock_unlock(&(thhul_cq_p->cq_lock)); -#ifndef IVAPI_THH - rc= THHUL_qpm_comp_ok(thhul_cq_p->qpm, qpn, wqe_addr_32lsb, - &qp_type,&qp_ts_type,&(vapi_cqe_p->id),&(vapi_cqe_p->free_res_count)); -#else - rc= THHUL_qpm_comp_ok(thhul_cq_p->qpm, qpn, wqe_addr_32lsb, - &qp_type,&qp_ts_type,&(vapi_cqe_p->id),&(vapi_cqe_p->free_res_count),NULL); -#endif - - if (MOSAL_EXPECT_FALSE(rc != HH_OK)) { - MTL_ERROR1("THHUL_cqm_poll4cqe: Failed updating associated QP.\n"); - DUMP_CQE(thhul_cq_p->cq_num,thhul_cq_p->cur_buf.consumer_index,cur_cqe); - return rc; - } - /* Extract the rest of the CQE fields into vapi_cqe_p*/ - - rc= extract_cqe_new(cqe_cpy,vapi_cqe_p,qp_type,qp_ts_type,opcode); - vapi_cqe_p->status= VAPI_SUCCESS; - if(MOSAL_EXPECT_FALSE(rc != HH_OK)) { - return rc; - } - else - return rc_rearm; - - - } else { /* Completion with error */ - /* Make CQE copy in correct endianess */ - for (i= 0; i < (CQE_SZ>>2); i++) { - cqe_cpy[i]= MOSAL_be32_to_cpu(cur_cqe[i]); - } - MTL_DEBUG4("THHUL_cqm_poll4cqe: completion with error: cq=%d consumer_index=%d\n", - thhul_cq_p->cq_num,thhul_cq_p->cur_buf.consumer_index); - DUMP_CQE(thhul_cq_p->cq_num,thhul_cq_p->cur_buf.consumer_index,cur_cqe); - rc= THHUL_qpm_comp_err(thhul_cq_p->qpm, qpn, wqe_addr_32lsb, - &(vapi_cqe_p->id),&(vapi_cqe_p->free_res_count),&next_wqe_addr_32lsb,&dbd_bit); - if (rc != HH_OK) { - MTL_ERROR1("THHUL_cqm_poll4cqe: Failed updating associated QP.\n"); - return rc; - } - vapi_cqe_p->status= decode_error_syndrome(MT_EXTRACT_ARRAY32(cqe_cpy, - CQE_ERROR_SYNDROM_BIT_OFFSET, CQE_ERROR_SYNDROM_BIT_SIZE) ); - vapi_cqe_p->vendor_err_syndrome= MT_EXTRACT_ARRAY32(cqe_cpy, - CQE_ERROR_VENDOR_SYNDROM_BIT_OFFSET, CQE_ERROR_VENDOR_SYNDROM_BIT_SIZE); - dbd_cnt= MT_EXTRACT_ARRAY32(cqe_cpy,CQE_ERROR_DBDCNT_BIT_OFFSET, CQE_ERROR_DBDCNT_BIT_SIZE); - if ((next_wqe_addr_32lsb == THHUL_QPM_END_OF_WQE_CHAIN) || /* End of WQE chain */ - ((dbd_cnt + 1 - dbd_bit) == 0) ) { /* or dbd counter reached 0 */ - MTPERF_TIME_START(free_cqe); - free_cqe(thhul_cq_p,cur_cqe); /* Free original CQE and update consumer index */ - MTPERF_TIME_END(free_cqe); - } else { - recycle_cqe(cur_cqe, next_wqe_addr_32lsb, dbd_cnt - dbd_bit); - } - MOSAL_spinlock_unlock(&(thhul_cq_p->cq_lock)); - /* Only WQE-ID, free_res_count and status are required for completion with error. - * No other CQE fields are extracted (see IB-spec. 11.4.2.1). - * Even though, for the sake of some legacy code: - * ...putting an opcode to distinguish completion of SQ from RQ*/ - if (opcode == CQE_ERROR_ON_SQ) { - vapi_cqe_p->opcode= VAPI_CQE_SQ_SEND_DATA; - } else { /* receive queue completion */ - vapi_cqe_p->opcode= VAPI_CQE_RQ_SEND_DATA; - } - } - - return rc; - -} - HH_ret_t THHUL_cqm_req_comp_notif( /*IN*/ HHUL_hca_hndl_t hca_hndl, diff --git a/trunk/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.h b/trunk/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.h index 5b05e3e1..f0271689 100644 --- a/trunk/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.h +++ b/trunk/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.h @@ -94,13 +94,6 @@ DLL_API HH_ret_t THHUL_cqm_cq_cleanup( /*IN*/ HHUL_srq_hndl_t srq ); - -DLL_API HH_ret_t THHUL_cqm_poll4cqe( - /*IN*/ HHUL_hca_hndl_t hca_hndl, - /*IN*/ HHUL_cq_hndl_t cq, - /*OUT*/ VAPI_wc_desc_t *vapi_cqe_p -); - #ifdef WIN32 #include DLL_API ib_api_status_t @@ -117,13 +110,6 @@ THHUL_cqm_count_cqe( OUT uint32_t* const p_n_cqes ); #endif -DLL_API HH_ret_t THHUL_cqm_poll_and_rearm_cq( - /*IN*/ HHUL_hca_hndl_t hca_hndl, - /*IN*/ HHUL_cq_hndl_t cq, - /*IN*/ int solicitedNotification, - /*OUT*/ VAPI_wc_desc_t *vapi_cqe_p -); - DLL_API HH_ret_t THHUL_cqm_peek_cq( /*IN*/ HHUL_hca_hndl_t hca_hndl, /*IN*/ HHUL_cq_hndl_t cq, diff --git a/trunk/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_hob/thhul_hob.c b/trunk/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_hob/thhul_hob.c index f2324640..f49bf701 100644 --- a/trunk/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_hob/thhul_hob.c +++ b/trunk/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_hob/thhul_hob.c @@ -83,8 +83,8 @@ static HHUL_if_ops_t thhul_ops= THHUL_cqm_create_cq_done /* HHULIF_create_cq_done */, THHUL_cqm_resize_cq_prep /* HHULIF_resize_cq_prep */, THHUL_cqm_resize_cq_done /* HHULIF_resize_cq_done */, - THHUL_cqm_poll4cqe /* HHULIF_poll4cqe */, - THHUL_cqm_poll_and_rearm_cq /* HHULIF_poll_and_rearm_cq */, + NULL /* HHULIF_poll4cqe */, + NULL /* HHULIF_poll_and_rearm_cq */, THHUL_cqm_peek_cq /* HHULIF_peek_cq */, THHUL_cqm_req_comp_notif /* HHULIF_req_comp_notif */, THHUL_cqm_req_ncomp_notif /* HHULIF_req_ncomp_notif */, diff --git a/trunk/inc/complib/cl_qmap.h b/trunk/inc/complib/cl_qmap.h index 6cbdbdbf..4981b4e9 100644 --- a/trunk/inc/complib/cl_qmap.h +++ b/trunk/inc/complib/cl_qmap.h @@ -45,6 +45,7 @@ #define _CL_QMAP_H_ +#include #include @@ -97,35 +98,6 @@ *********/ -/****i* Component Library: Quick Map/cl_map_color_t -* NAME -* cl_map_color_t -* -* DESCRIPTION -* The cl_map_color_t enumerated type is used to note the color of -* nodes in a map. -* -* SYNOPSIS -*/ -typedef enum _cl_map_color -{ - CL_MAP_RED, - CL_MAP_BLACK - -} cl_map_color_t; -/* -* VALUES -* CL_MAP_RED -* The node in the map is red. -* -* CL_MAP_BLACK -* The node in the map is black. -* -* SEE ALSO -* Quick Map, cl_map_item_t -*********/ - - /****s* Component Library: Quick Map/cl_map_item_t * NAME * cl_map_item_t diff --git a/trunk/inc/complib/cl_rbmap.h b/trunk/inc/complib/cl_rbmap.h new file mode 100644 index 00000000..7e73fb42 --- /dev/null +++ b/trunk/inc/complib/cl_rbmap.h @@ -0,0 +1,593 @@ +/*++ +Copyright © InfiniCon Systems, Inc. All rights reserved. + +THIS SOFTWARE IS PROVIDED BY INFINICON SYSTEMS, INC. ("INFINICON") TO EACH +PERSON OR COMPANY ("RECIPIENT") ON AN "AS IS" BASIS. ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL INFINICON BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +OR BUSINESS INTERRUPTION) HOWEVER CAUSED OR ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +Any agreements between InfiniCon and the Recipient shall apply to Recipient's +use of the Software. +--*/ + + +/* + * Abstract: + * Declaration of primitive red/black map, a red/black tree where the caller + * always provides all necessary storage. + * + * This tree implementation exposes functions required for the client to + * manually walk the map, allowing clients to implement various methods + * of comparisson. + * + * Environment: + * All + * + * $Revision$ + */ + + +#ifndef _CL_RBMAP_H_ +#define _CL_RBMAP_H_ + + +#include + + +/****h* Component Library/RB Map +* NAME +* RB Map +* +* DESCRIPTION +* RB map implements a binary tree that stores user provided cl_rbmap_item_t +* structures. Each item stored in a RB map has a unique key +* (duplicates are not allowed). RB map provides the ability to +* efficiently search for an item given a key. +* +* RB map does not allocate any memory, and can therefore not fail +* any operations due to insufficient memory. RB map can thus be useful +* in minimizing the error paths in code. +* +* RB map is not thread safe, and users must provide serialization when +* adding and removing items from the map. +* +* The RB map functions operate on a cl_rbmap_t structure which should be +* treated as opaque and should be manipulated only through the provided +* functions. +* +* SEE ALSO +* Structures: +* cl_rbmap_t, cl_rbmap_item_t +* +* Initialization: +* cl_rbmap_init +* +* Iteration: +* cl_rbmap_root, cl_rbmap_end, cl_rbmap_left, cl_rbmap_right, cl_rbmap_up +* +* Manipulation: +* cl_rbmap_insert, cl_rbmap_get, cl_rbmap_remove_item, cl_rbmap_remove, +* cl_rbmap_reset, cl_rbmap_merge, cl_rbmap_delta +* +* Search: +* cl_rbmap_apply_func +* +* Attributes: +* cl_rbmap_count, cl_is_rbmap_empty, +*********/ + + +/****i* Component Library: RB Map/cl_map_color_t +* NAME +* cl_map_color_t +* +* DESCRIPTION +* The cl_map_color_t enumerated type is used to note the color of +* nodes in a map. +* +* SYNOPSIS +*/ +typedef enum _cl_map_color +{ + CL_MAP_RED, + CL_MAP_BLACK + +} cl_map_color_t; +/* +* VALUES +* CL_MAP_RED +* The node in the map is red. +* +* CL_MAP_BLACK +* The node in the map is black. +* +* SEE ALSO +* RB Map, cl_rbmap_item_t +*********/ + + +/****s* Component Library: RB Map/cl_rbmap_item_t +* NAME +* cl_rbmap_item_t +* +* DESCRIPTION +* The cl_rbmap_item_t structure is used by maps to store objects. +* +* The cl_rbmap_item_t structure should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_rbmap_item +{ + struct _cl_rbmap_item *p_left; + struct _cl_rbmap_item *p_right; + struct _cl_rbmap_item *p_up; + cl_map_color_t color; +#ifdef _DEBUG_ + struct _cl_rbmap *p_map; +#endif + +} cl_rbmap_item_t; +/* +* FIELDS +* p_left +* Pointer to the map item that is a child to the left of the node. +* +* p_right +* Pointer to the map item that is a child to the right of the node. +* +* p_up +* Pointer to the map item that is the parent of the node. +* +* color +* Indicates whether a node is red or black in the map. +* +* NOTES +* None of the fields of this structure should be manipulated by users, as +* they are crititcal to the proper operation of the map in which they +* are stored. +* +* To allow storing items in either a quick list, a quick pool, or a quick +* map, the map implementation guarantees that the map item can be safely +* cast to a pool item used for storing an object in a quick pool, or cast to +* a list item used for storing an object in a quick list. This removes the +* need to embed a map item, a list item, and a pool item in objects that need +* to be stored in a quick list, a quick pool, and a RB map. +* +* SEE ALSO +* RB Map, cl_rbmap_insert, cl_rbmap_key, cl_pool_item_t, cl_list_item_t +*********/ + + +/****s* Component Library: RB Map/cl_rbmap_t +* NAME +* cl_rbmap_t +* +* DESCRIPTION +* Quick map structure. +* +* The cl_rbmap_t structure should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_rbmap +{ + cl_rbmap_item_t root; + cl_rbmap_item_t nil; + cl_state_t state; + size_t count; + +} cl_rbmap_t; +/* +* PARAMETERS +* root +* Map item that serves as root of the map. The root is set up to +* always have itself as parent. The left pointer is set to point to +* the item at the root. +* +* nil +* Map item that serves as terminator for all leaves, as well as providing +* the list item used as quick list for storing map items in a list for +* faster traversal. +* +* state +* State of the map, used to verify that operations are permitted. +* +* count +* Number of items in the map. +* +* SEE ALSO +* RB Map +*********/ + + +#ifdef __cplusplus +extern "C" { +#endif + + +/****f* Component Library: RB Map/cl_rbmap_count +* NAME +* cl_rbmap_count +* +* DESCRIPTION +* The cl_rbmap_count function returns the number of items stored +* in a RB map. +* +* SYNOPSIS +*/ +CL_INLINE size_t CL_API +cl_rbmap_count( + IN const cl_rbmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + return( p_map->count ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_rbmap_t structure whose item count to return. +* +* RETURN VALUE +* Returns the number of items stored in the map. +* +* SEE ALSO +* RB Map, cl_is_rbmap_empty +*********/ + + +/****f* Component Library: RB Map/cl_is_rbmap_empty +* NAME +* cl_is_rbmap_empty +* +* DESCRIPTION +* The cl_is_rbmap_empty function returns whether a RB map is empty. +* +* SYNOPSIS +*/ +CL_INLINE boolean_t CL_API +cl_is_rbmap_empty( + IN const cl_rbmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + + return( p_map->count == 0 ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_rbmap_t structure to test for emptiness. +* +* RETURN VALUES +* TRUE if the RB map is empty. +* +* FALSE otherwise. +* +* SEE ALSO +* RB Map, cl_rbmap_count, cl_rbmap_reset +*********/ + + +/****f* Component Library: RB Map/cl_rbmap_reset +* NAME +* cl_rbmap_reset +* +* DESCRIPTION +* The cl_rbmap_reset function removes all items in a RB map, +* leaving it empty. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_rbmap_reset( + IN cl_rbmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + + p_map->root.p_left = &p_map->nil; + p_map->count = 0; +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_rbmap_t structure to empty. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* RB Map, cl_rbmap_remove, cl_rbmap_remove_item +*********/ + + +/****f* Component Library: RB Map/cl_rbmap_init +* NAME +* cl_rbmap_init +* +* DESCRIPTION +* The cl_rbmap_init function initialized a RB map for use. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_rbmap_init( + IN cl_rbmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + + /* special setup for the root node */ + p_map->root.p_left = &p_map->nil; + p_map->root.p_right = &p_map->nil; + p_map->root.p_up = &p_map->root; + p_map->root.color = CL_MAP_BLACK; + + /* Setup the node used as terminator for all leaves. */ + p_map->nil.p_left = &p_map->nil; + p_map->nil.p_right = &p_map->nil; + p_map->nil.p_up = &p_map->nil; + p_map->nil.color = CL_MAP_BLACK; + +#ifdef _DEBUG_ + p_map->root.p_map = p_map; + p_map->nil.p_map = p_map; +#endif + + p_map->state = CL_INITIALIZED; + + p_map->count = 0; +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_rbmap_t structure to initialize. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* Allows calling RB map manipulation functions. +* +* SEE ALSO +* RB Map, cl_rbmap_insert, cl_rbmap_remove +*********/ + + +/****f* Component Library: RB Map/cl_rbmap_root +* NAME +* cl_rbmap_root +* +* DESCRIPTION +* The cl_rbmap_root function returns the root of a RB map. +* +* SYNOPSIS +*/ +CL_INLINE cl_rbmap_item_t* const CL_API +cl_rbmap_root( + IN const cl_rbmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + return( p_map->root.p_left ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_rbmap_t structure whose root to return. +* +* RETURN VALUE +* Pointer to the end of the map. +* +* NOTES +* cl_rbmap_end is useful for determining the validity of map items returned +* by cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, or cl_rbmap_prev. If the map +* item pointer returned by any of these functions compares to the end, the +* end of the map was encoutered. +* When using cl_rbmap_head or cl_rbmap_tail, this condition indicates that +* the map is empty. +* +* SEE ALSO +* RB Map, cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, cl_rbmap_prev, +* cl_rbmap_end, cl_rbmap_left, cl_rbmap_right, cl_rbmap_up +*********/ + + +/****f* Component Library: RB Map/cl_rbmap_end +* NAME +* cl_rbmap_end +* +* DESCRIPTION +* The cl_rbmap_end function returns the end of a RB map. +* +* SYNOPSIS +*/ +CL_INLINE const cl_rbmap_item_t* const CL_API +cl_rbmap_end( + IN const cl_rbmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + /* Nil is the end of the map. */ + return( &p_map->nil ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_rbmap_t structure whose end to return. +* +* RETURN VALUE +* Pointer to the end of the map. +* +* NOTES +* cl_rbmap_end is useful for determining the validity of map items returned +* by cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, or cl_rbmap_prev. If the map +* item pointer returned by any of these functions compares to the end, the +* end of the map was encoutered. +* When using cl_rbmap_head or cl_rbmap_tail, this condition indicates that +* the map is empty. +* +* SEE ALSO +* RB Map, cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, cl_rbmap_prev +* cl_rbmap_root, cl_rbmap_left, cl_rbmap_right, cl_rbmap_up +*********/ + + +/****f* Component Library: RB Map/cl_rbmap_left +* NAME +* cl_rbmap_left +* +* DESCRIPTION +* The cl_rbmap_left function returns the map item to the left +* of the specified map item. +* +* SYNOPSIS +*/ +CL_INLINE cl_rbmap_item_t* CL_API +cl_rbmap_left( + IN const cl_rbmap_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + return( (cl_rbmap_item_t*)p_item->p_left ); +} +/* +* PARAMETERS +* p_item +* [in] Pointer to a map item whose predecessor to return. +* +* RETURN VALUES +* Pointer to the map item to the left in a RB map. +* +* Pointer to the map end if no item is to the left. +* +* SEE ALSO +* RB Map, cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, cl_rbmap_end, +* cl_rbmap_item_t +*********/ + + +/****f* Component Library: RB Map/cl_rbmap_right +* NAME +* cl_rbmap_right +* +* DESCRIPTION +* The cl_rbmap_right function returns the map item to the right +* of the specified map item. +* +* SYNOPSIS +*/ +CL_INLINE cl_rbmap_item_t* CL_API +cl_rbmap_right( + IN const cl_rbmap_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + return( (cl_rbmap_item_t*)p_item->p_right ); +} +/* +* PARAMETERS +* p_item +* [in] Pointer to a map item whose predecessor to return. +* +* RETURN VALUES +* Pointer to the map item to the right in a RB map. +* +* Pointer to the map end if no item is to the right. +* +* SEE ALSO +* RB Map, cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, cl_rbmap_end, +* cl_rbmap_item_t +*********/ + + +/****f* Component Library: RB Map/cl_rbmap_insert +* NAME +* cl_rbmap_insert +* +* DESCRIPTION +* The cl_rbmap_insert function inserts a map item into a RB map. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_rbmap_insert( + IN cl_rbmap_t* const p_map, + IN cl_rbmap_item_t* const p_insert_at, + IN cl_rbmap_item_t* const p_item, + IN boolean_t left ); +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_rbmap_t structure into which to add the item. +* +* p_insert_at +* [in] Pointer to a cl_rbmap_item_t structure to serve as parent +* to p_item. +* +* p_item +* [in] Pointer to a cl_rbmap_item_t stucture to insert into the RB map. +* +* left +* [in] Indicates that p_item should be inserted to the left of p_insert_at. +* +* RETURN VALUE +* Pointer to the item in the map with the specified key. If insertion +* was successful, this is the pointer to the item. If an item with the +* specified key already exists in the map, the pointer to that item is +* returned. +* +* NOTES +* Insertion operations may cause the RB map to rebalance. +* +* SEE ALSO +* RB Map, cl_rbmap_remove, cl_rbmap_item_t +*********/ + + +/****f* Component Library: RB Map/cl_rbmap_remove_item +* NAME +* cl_rbmap_remove_item +* +* DESCRIPTION +* The cl_rbmap_remove_item function removes the specified map item +* from a RB map. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_rbmap_remove_item( + IN cl_rbmap_t* const p_map, + IN cl_rbmap_item_t* const p_item ); +/* +* PARAMETERS +* p_item +* [in] Pointer to a map item to remove from its RB map. +* +* RETURN VALUES +* This function does not return a value. +* +* In a debug build, cl_rbmap_remove_item asserts that the item being removed +* is in the specified map. +* +* NOTES +* Removes the map item pointed to by p_item from its RB map. +* +* SEE ALSO +* RB Map, cl_rbmap_remove, cl_rbmap_reset, cl_rbmap_insert +*********/ + + +#ifdef __cplusplus +} +#endif + + +#endif /* _CL_RBMAP_H_ */ diff --git a/trunk/inc/iba/ib_al.h b/trunk/inc/iba/ib_al.h index fa08dc2a..0aa34fbb 100644 --- a/trunk/inc/iba/ib_al.h +++ b/trunk/inc/iba/ib_al.h @@ -67,13 +67,21 @@ typedef struct _al_mad_svc* __ptr64 ib_mad_svc_handle_t; typedef struct _al_query* __ptr64 ib_query_handle_t; typedef struct _al_sub* __ptr64 ib_sub_handle_t; typedef struct _al_listen* __ptr64 ib_listen_handle_t; -typedef struct _al_conn* __ptr64 ib_cm_handle_t; typedef struct _al_ioc* __ptr64 ib_ioc_handle_t; typedef struct _al_svc_entry* __ptr64 ib_svc_handle_t; typedef struct _al_pool_key* __ptr64 ib_pool_key_t; typedef struct _al_pool* __ptr64 ib_pool_handle_t; +typedef struct _ib_cm_handle +{ + ib_al_handle_t h_al; + ib_qp_handle_t h_qp; + net32_t cid; + +} ib_cm_handle_t; + + /****s* Access Layer/ib_shmid_t * NAME * ib_shmid_t @@ -3528,6 +3536,7 @@ typedef struct _ib_apr_info * * SYNOPSIS */ +#pragma warning(disable:4324) typedef struct _ib_cm_req_rec { const void* __ptr64 context; @@ -3550,6 +3559,7 @@ typedef struct _ib_cm_req_rec const void* __ptr64 sidr_context; } ib_cm_req_rec_t; +#pragma warning(default:4324) /* * FIELDS * context @@ -3708,7 +3718,6 @@ typedef struct _ib_cm_rtu_rec { const uint8_t* __ptr64 p_rtu_pdata; - ib_qp_type_t qp_type; ib_qp_handle_t h_qp; const void* __ptr64 qp_context; @@ -3719,9 +3728,6 @@ typedef struct _ib_cm_rtu_rec * A reference to user-defined private data sent as part of the ready * to use message. * -* qp_type -* Indicates the CM service type. -* * h_qp * The queue pair handle associated with the connection request. * @@ -3752,9 +3758,6 @@ typedef struct _ib_cm_rej_rec const uint8_t* __ptr64 p_rej_pdata; - ib_qp_type_t qp_type; - - /* valid for rc, uc & rd qp_type only */ ib_qp_handle_t h_qp; const void* __ptr64 qp_context; @@ -3776,9 +3779,6 @@ typedef struct _ib_cm_rej_rec * A reference to user-defined private data sent as part of the connection * request reply. * -* qp_type -* Indicates the CM service type. -* * h_qp * The queue pair handle associated with a connection request. * @@ -3805,9 +3805,6 @@ typedef struct _ib_cm_mra_rec { const uint8_t* __ptr64 p_mra_pdata; - ib_qp_type_t qp_type; - - /* valid for rc, uc rd qp_type only */ ib_qp_handle_t h_qp; const void* __ptr64 qp_context; @@ -3817,9 +3814,6 @@ typedef struct _ib_cm_mra_rec * p_mra_pdata * A reference to user-defined private data sent as part of the MRA. * -* qp_type -* Indicates the CM service type. -* * h_qp * The queue pair handle associated with a connection request. * @@ -3848,9 +3842,6 @@ typedef struct _ib_cm_lap_rec const uint8_t* __ptr64 p_lap_pdata; - ib_qp_type_t qp_type; - - /* valid for rc, uc & rd qp_type only */ const void* __ptr64 qp_context; } ib_cm_lap_rec_t; @@ -3860,9 +3851,6 @@ typedef struct _ib_cm_lap_rec * A reference to user-defined private data sent as part of the load * alternate path request. * -* qp_type -* Indicates the CM service type. -* * qp_context * The queue pair context associated with a connection request. * @@ -3899,9 +3887,6 @@ typedef struct _ib_cm_apr_rec const uint8_t* __ptr64 p_apr_pdata; - ib_qp_type_t qp_type; - - /* valid for rc, uc & rd qp_type only */ ib_qp_handle_t h_qp; const void* __ptr64 qp_context; @@ -3932,9 +3917,6 @@ typedef struct _ib_cm_apr_rec * A reference to user-defined private data sent as part of the alternate * path response. * -* qp_type -* Indicates the CM service type. -* * h_qp * The queue pair handle associated with the alternate path response. * @@ -3963,9 +3945,6 @@ typedef struct _ib_cm_dreq_rec const uint8_t* __ptr64 p_dreq_pdata; - ib_qp_type_t qp_type; - - /* valid for rc, uc & rd qp_type only */ const void* __ptr64 qp_context; } ib_cm_dreq_rec_t; @@ -3979,9 +3958,6 @@ typedef struct _ib_cm_dreq_rec * A reference to user-defined private data sent as part of the * disconnect request. * -* qp_type -* Indicates the CM service type. -* * qp_context * The queue pair context associated with the disconnect request. * @@ -4006,9 +3982,6 @@ typedef struct _ib_cm_drep_rec const uint8_t* __ptr64 p_drep_pdata; - ib_qp_type_t qp_type; - - /* valid for rc, uc & rd qp_type only */ ib_qp_handle_t h_qp; const void* __ptr64 qp_context; @@ -4024,9 +3997,6 @@ typedef struct _ib_cm_drep_rec * A reference to user-defined private data sent as part of the * disconnect reply. * -* qp_type -* Indicates the CM service type. -* * h_qp * The queue pair handle associated with the disconnect reply. * @@ -4384,17 +4354,13 @@ typedef struct _ib_cm_listen ib_net16_t pkey; uint8_t* __ptr64 p_compare_buffer; - uint32_t compare_offset; - uint32_t compare_length; + uint8_t compare_offset; + uint8_t compare_length; ib_pfn_cm_req_cb_t pfn_cm_req_cb; ib_qp_type_t qp_type; - /* valid for rc, uc & rd qp_type only */ - ib_pfn_cm_mra_cb_t pfn_cm_mra_cb; - ib_pfn_cm_rej_cb_t pfn_cm_rej_cb; - /* valid for ud qp_type only */ const void* __ptr64 sidr_context; @@ -4505,8 +4471,8 @@ typedef struct _ib_cm_req ib_qp_handle_t h_qp; uint8_t* __ptr64 p_compare_buffer; - uint32_t compare_offset; - uint32_t compare_length; + uint8_t compare_offset; + uint8_t compare_length; uint8_t resp_res; uint8_t init_depth; @@ -4706,6 +4672,8 @@ typedef struct _ib_cm_rep uint8_t rnr_nak_timeout; uint8_t rnr_retry_cnt; + ib_pfn_cm_rej_cb_t pfn_cm_rej_cb; + ib_pfn_cm_mra_cb_t pfn_cm_mra_cb; ib_pfn_cm_rtu_cb_t pfn_cm_rtu_cb; ib_pfn_cm_lap_cb_t pfn_cm_lap_cb; ib_pfn_cm_dreq_cb_t pfn_cm_dreq_cb; @@ -5974,6 +5942,70 @@ ib_cm_handoff( *****/ +typedef struct _ib_cep +{ + void *context; + net32_t cid; + +} ib_cep_t; + + +/****s* Access Layer/ib_cep_listen_t +* NAME +* ib_cep_listen_t +* +* DESCRIPTION +* Request to listen for incoming connection attempts. +* +* SYNOPSIS +*/ +typedef struct _ib_cep_listen +{ + net64_t svc_id; + + net64_t port_guid; + + uint8_t* __ptr64 p_cmp_buf; + uint8_t cmp_len; + uint8_t cmp_offset; + +} ib_cep_listen_t; +/* +* FIELDS +* svc_id +* The identifier of the service to register for incoming connection +* requests. +* +* port_guid +* Directs the communication manager to register the listen only +* with the specified port. This should be set to IB_ALL_PORTS +* if the listen is not directed to a particular port. +* +* p_cmp_buf +* An optionally provided buffer that will be used to match incoming +* connection requests with a registered service. Use of this buffer +* permits multiple services to listen on the same service ID as long as +* they provide different compare buffers. Incoming requests will +* be matched against the compare buffer. +* +* cmp_len +* Specifies the size of the compare buffer in bytes. The length must +* be the same for all requests using the same service ID. +* +* cmp_offset +* An offset into the user-defined data area of a connection request +* which contains the start of the data that will be compared against. +* The offset must be the same for all requests using the same service ID. +* +* NOTES +* Users fill out this structure when listening on a service ID with the +* local communication manager. The communication manager will use the given +* service ID and compare buffer to route connection requests to the +* appropriate client. Users may direct listens requests on a particular +* channel adapter, port, or LID. +*****/ + + /****f* Access Layer/ib_create_ioc * NAME * ib_create_ioc diff --git a/trunk/inc/iba/ib_al_ioctl.h b/trunk/inc/iba/ib_al_ioctl.h index 62f7e03f..b3a9e907 100644 --- a/trunk/inc/iba/ib_al_ioctl.h +++ b/trunk/inc/iba/ib_al_ioctl.h @@ -2228,85 +2228,58 @@ typedef union _ual_mad_recv_ioctl -/****s* User-mode Access Layer/ual_cm_listen_ioctl_t +/****s* User-mode Access Layer/ual_create_cep_ioctl_t * NAME -* ual_cm_listen_ioctl_t +* ual_create_cep_ioctl_t * * DESCRIPTION -* IOCTL structure containing the input and output parameters to -* perform a CM listen request. +* IOCTL structure containing the output parameters to +* create a CEP. * * SYNOPSIS */ -typedef union _ual_cm_listen_ioctl +typedef struct _ual_create_cep_ioctl { - struct _ual_cm_listen_ioctl_in - { - void* __ptr64 context; - ib_cm_listen_t cm_listen; - /* Compare data buffer follows IOCTL buffer immediately. */ + ib_api_status_t status; + net32_t cid; - } in; - struct _ual_cm_listen_ioctl_out - { - ib_api_status_t status; - uint64_t h_cm_listen; - - } out; - -} ual_cm_listen_ioctl_t; +} ual_create_cep_ioctl_t; /* * FIELDS -* in.listen_context -* User-specified context information that is returned as a part of all -* connection requests through the pfn_cm_req_cb routine. The context is -* also returned through the error and destroy callbacks. -* -* in.cm_listen -* Information used to direct the listen request to match incoming -* connection requests. -* -* out.status +* status * Status of the operation. * -* out.h_cm_listen -* Upon successful completion of this call, handle to the listen request. -* This handle may be used to cancel the listen operation. +* cid +* CID of the created CEP. *****/ -/****s* User-mode Access Layer/ual_cm_cancel_ioctl_t +/****s* User-mode Access Layer/ual_cep_listen_ioctl_t * NAME -* ual_cm_cancel_ioctl_t +* ual_cep_listen_ioctl_t * * DESCRIPTION -* IOCTL structure containing the input and output parameters for -* ib_cm_cancel +* IOCTL structure containing the input parameters to +* perform a CM listen request. * * SYNOPSIS */ -typedef union _ual_cm_cancel_ioctl +typedef struct _ual_cep_listen_ioctl { - struct _ual_cm_cancel_ioctl_in - { - uint64_t h_cm_listen; + net32_t cid; + ib_cep_listen_t cep_listen; + uint8_t compare[IB_REQ_PDATA_SIZE]; - } in; - struct _ual_cm_cancel_ioctl_out - { - ib_api_status_t status; - - } out; - -} ual_cm_cancel_ioctl_t; +} ual_cep_listen_ioctl_t; /* * FIELDS -* in.h_cm_listen -* The listen handle that needs to be cancelled. +* in.cid +* CID of an existing CEP. * -* out.status -* Status of the operation. +* in.cep_listen +* Information used to direct the listen request to match incoming +* connection requests. *****/ @@ -2317,31 +2290,34 @@ typedef union _ual_cm_cancel_ioctl * * DESCRIPTION * IOCTL structure containing the input and output parameters for -* ib_cm_req +* al_cep_pre_req call. * * SYNOPSIS */ -typedef union _ual_cm_req_ioctl +typedef union _ual_cep_req_ioctl { - struct _ual_cm_req_ioctl_in + struct _ual_cep_req_ioctl_in { - uint64_t h_qp; /* for CM */ + net32_t cid; ib_cm_req_t cm_req; - ib_path_rec_t paths[1]; - /* If an alternate path is specified, it follows the primary path. */ - /* private data follows the IOCTL buffer immediately. */ - /* compare data follows private data immediately. */ + ib_path_rec_t paths[2]; + uint8_t pdata[IB_REQ_PDATA_SIZE]; + uint8_t compare[IB_REQ_PDATA_SIZE]; } in; - struct _ual_cm_req_ioctl_out + struct _ual_cep_req_ioctl_out { ib_api_status_t status; + ib_qp_mod_t init; } out; -} ual_cm_req_ioctl_t; +} ual_cep_req_ioctl_t; /* * FIELDS +* in.cid +* CID of the target CEP. +* * in.cm_req * CM REQ parameters. * @@ -2350,37 +2326,40 @@ typedef union _ual_cm_req_ioctl * * out.status * Status of the operation +* +* out.init +* QP modify paramters for INIT state transition. *****/ -/****s* User-mode Access Layer/ual_cm_rep_ioctl_t +/****s* User-mode Access Layer/ual_cep_rep_ioctl_t * NAME -* ual_cm_rep_ioctl_t +* ual_cep_rep_ioctl_t * * DESCRIPTION * IOCTL structure containing the input and output parameters for -* ib_cm_rep +* al_cep_pre_rep call. * * SYNOPSIS */ -typedef union _ual_cm_rep_ioctl +typedef union _ual_cep_rep_ioctl { - struct _ual_cm_rep_ioctl_in + struct _ual_cep_rep_ioctl_in { - uint64_t h_cm_req; - uint64_t h_qp; + net32_t cid; ib_cm_rep_t cm_rep; - /* Private data follows immediately. */ + uint8_t pdata[IB_REP_PDATA_SIZE]; } in; - struct _ual_cm_rep_ioctl_out + struct _ual_cep_rep_ioctl_out { ib_api_status_t status; + ib_qp_mod_t init; } out; -} ual_cm_rep_ioctl_t; +} ual_cep_rep_ioctl_t; /* * FIELDS * in.h_cm_req @@ -2391,93 +2370,145 @@ typedef union _ual_cm_rep_ioctl * * out.status * Status of the operation. +* +* out.init +* QP modify paramters for INIT state transition. *****/ -/****s* User-mode Access Layer/ual_cm_rtu_ioctl_t +/****s* User-mode Access Layer/ual_cep_get_rtr_ioctl_t * NAME -* ual_cm_rtu_ioctl_t +* ual_cep_get_rtr_ioctl_t * * DESCRIPTION -* IOCTL structure containing the input and output parameters for -* ib_cm_rtu +* IOCTL structure containing the output parameters for +* al_cep_get_rtr_attr call. * * SYNOPSIS */ -typedef union _ual_cm_rtu_ioctl +typedef struct _ual_cep_get_rtr_ioctl { - struct _ual_cm_rtu_ioctl_in - { - uint64_t h_cm_rep; - ib_cm_rtu_t cm_rtu; - /* Private data follows IOCTL buffer. */ + ib_api_status_t status; + ib_qp_mod_t rtr; - } in; - struct _ual_cm_rtu_ioctl_out - { - ib_api_status_t status; +} ual_cep_get_rtr_ioctl_t; +/* +* FIELDS +* out.status +* Status of the operation. +* +* out.rtr +* QP modify paramters for RTR state transition. +*****/ - } out; -} ual_cm_rtu_ioctl_t; + +/****s* User-mode Access Layer/ual_cep_get_rts_ioctl_t +* NAME +* ual_cep_get_rts_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the output parameters for +* al_cep_get_rts_attr call. +* +* SYNOPSIS +*/ +typedef struct _ual_cep_get_rts_ioctl +{ + ib_api_status_t status; + ib_qp_mod_t rts; + +} ual_cep_get_rts_ioctl_t; /* * FIELDS -* in.h_cm_rep +* out.status +* Status of the operation. +* +* out.rts +* QP modify paramters for RTS state transition. +*****/ + + + +/****s* User-mode Access Layer/ual_cep_rtu_ioctl_t +* NAME +* ual_cep_rtu_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input parameters for +* al_cep_rtu call. +* +* SYNOPSIS +*/ +typedef struct _ual_cep_rtu_ioctl +{ + net32_t cid; + uint8_t pdata_len; + uint8_t pdata[IB_RTU_PDATA_SIZE]; + +} ual_cep_rtu_ioctl_t; +/* +* FIELDS +* in.cid * The cm_rep connection handle got on the callback. * -* in.cm_rtu -* CM RTU parameters. +* in.pdata_len +* Length of private data. * -* out.status -* Status of the operation +* in.pdata +* Private data. *****/ -/****s* User-mode Access Layer/ual_cm_rej_ioctl_t +/****s* User-mode Access Layer/ual_cep_rej_ioctl_t * NAME -* ual_cm_rej_ioctl_t +* ual_cep_rej_ioctl_t * * DESCRIPTION -* IOCTL structure containing the input and output parameters for -* ib_cm_rej +* IOCTL structure containing the input parameters for +* al_cep_rej * * SYNOPSIS */ -typedef union _ual_cm_rej_ioctl +typedef struct _ual_cep_rej_ioctl { - struct _ual_cm_rej_ioctl_in - { - uint64_t h_cm; - ib_cm_rej_t cm_rej; - /* ARI and private data data follow IOCTL buffer immediately. */ + net32_t cid; - } in; - struct _ual_cm_rej_ioctl_out - { - ib_api_status_t status; - - } out; + ib_rej_status_t rej_status; + uint8_t ari_len; + uint8_t pdata_len; + uint8_t ari[IB_ARI_SIZE]; + uint8_t pdata[IB_REJ_PDATA_SIZE]; -} ual_cm_rej_ioctl_t; +} ual_cep_rej_ioctl_t; /* * FIELDS -* in.h_cm -* The connection handle got on the callback. +* in.cid +* The CID of the target CEP. * -* in.cm_rej -* CM REJ parameters. +* in.rej_status +* Rejection status as defined in IB spec. * -* out.status -* Status of the operation. +* in.ari_len +* Length of the ARI data. +* +* in.pdata_len +* Length of the private data. +* +* in.ari +* ARI data. +* +* in.pdata +* Private data. *****/ -/****s* User-mode Access Layer/ual_cm_handoff_ioctl_t +/****s* User-mode Access Layer/ual_cep_handoff_ioctl_t * NAME -* ual_cm_handoff_ioctl_t +* ual_cep_handoff_ioctl_t * * DESCRIPTION * IOCTL structure containing the input and output parameters for @@ -2485,21 +2516,21 @@ typedef union _ual_cm_rej_ioctl * * SYNOPSIS */ -typedef union _ual_cm_handoff_ioctl +typedef union _ual_cep_handoff_ioctl { - struct _ual_cm_handoff_ioctl_in + struct _ual_cep_handoff_ioctl_in { uint64_t h_cm; net64_t sid; } in; - struct _ual_cm_handoff_ioctl_out + struct _ual_cep_handoff_ioctl_out { ib_api_status_t status; } out; -} ual_cm_handoff_ioctl_t; +} ual_cep_handoff_ioctl_t; /* * FIELDS * in.h_cm @@ -2514,73 +2545,52 @@ typedef union _ual_cm_handoff_ioctl -/****s* User-mode Access Layer/ual_cm_mra_ioctl_t +/****s* User-mode Access Layer/ual_cep_mra_ioctl_t * NAME -* ual_cm_mra_ioctl_t +* ual_cep_mra_ioctl_t * * DESCRIPTION -* IOCTL structure containing the input and output parameters for +* IOCTL structure containing the input parameters for * ib_cm_mra * * SYNOPSIS */ -typedef union _ual_cm_mra_ioctl +typedef struct _ual_cep_mra_ioctl { - struct _ual_cm_mra_ioctl_in - { - uint64_t h_cm; - ib_cm_mra_t cm_mra; - /* Private data follows IOCTL buffer immediately. */ + net32_t cid; + ib_cm_mra_t cm_mra; + uint8_t pdata[IB_MRA_PDATA_SIZE]; - } in; - struct _ual_cm_mra_ioctl_out - { - ib_api_status_t status; - - } out; - -} ual_cm_mra_ioctl_t; +} ual_cep_mra_ioctl_t; /* * FIELDS -* in.h_cm -* The connection handle got on the callback. +* in.cid +* The CID for the target CEP. * -* cm_mra +* in.cm_mra * CM MRA parameters. -* -* out.status -* Status of the operation *****/ -/****s* User-mode Access Layer/ual_cm_lap_ioctl_t +/****s* User-mode Access Layer/ual_cep_lap_ioctl_t * NAME -* ual_cm_lap_ioctl_t +* ual_cep_lap_ioctl_t * * DESCRIPTION -* IOCTL structure containing the input and output parameters for +* IOCTL structure containing the input parameters for * ib_cm_lap * * SYNOPSIS */ -typedef union _ual_cm_lap_ioctl +typedef struct _ual_cep_lap_ioctl { - struct _ual_cm_lap_ioctl_in - { - uint64_t h_qp; - ib_cm_lap_t cm_lap; - ib_path_rec_t alt_path; - /* Private data follows IOCTL buffer immediately */ + net32_t cid; + ib_cm_lap_t cm_lap; + ib_path_rec_t alt_path; + uint8_t pdata[IB_LAP_PDATA_SIZE]; - } in; - struct _ual_cm_lap_ioctl_out - { - ib_api_status_t status; - - } out; - -} ual_cm_lap_ioctl_t; +} ual_cep_lap_ioctl_t; /* * FIELDS * in.cm_lap @@ -2588,40 +2598,39 @@ typedef union _ual_cm_lap_ioctl * * in.alt_path * Alternate path information. -* -* out.status -* Status of the operation *****/ -/****s* User-mode Access Layer/ual_cm_apr_ioctl_t +/****s* User-mode Access Layer/ual_cep_apr_ioctl_t * NAME -* ual_cm_apr_ioctl_t +* ual_cep_apr_ioctl_t * * DESCRIPTION -* IOCTL structure containing the input and output parameters for -* ib_cm_apr +* IOCTL structure containing the input parameters for +* ib_cep_apr * * SYNOPSIS */ -typedef union _ual_cm_apr_ioctl +typedef union _ual_cep_apr_ioctl { - struct _ual_cm_apr_ioctl_in + struct _ual_cep_apr_ioctl_in { - uint64_t h_cm_lap; - uint64_t h_qp; - ib_cm_apr_t cm_apr; - /* Info and Private data follow IOCTL buffer immediately */ + net32_t cid; + ib_cm_apr_t cm_apr; + uint8_t apr_info[IB_APR_INFO_SIZE]; + uint8_t pdata[IB_APR_PDATA_SIZE]; } in; - struct _ual_cm_apr_ioctl_out + + struct _ual_cep_apr_ioctl_out { - ib_api_status_t status; + ib_api_status_t status; + ib_qp_mod_t apr; } out; -} ual_cm_apr_ioctl_t; +} ual_cep_apr_ioctl_t; /* * FIELDS * in.h_cm_lap @@ -2629,9 +2638,6 @@ typedef union _ual_cm_apr_ioctl * * in.cm_apr * CM APR parameters. -* -* out.status -* Status of the operation. *****/ @@ -2671,46 +2677,34 @@ typedef union _ual_force_apm_ioctl -/****s* User-mode Access Layer/ual_cm_dreq_ioctl_t +/****s* User-mode Access Layer/ual_cep_dreq_ioctl_t * NAME -* ual_cm_dreq_ioctl_t +* ual_cep_dreq_ioctl_t * * DESCRIPTION -* IOCTL structure containing the input and output parameters for +* IOCTL structure containing the input parameters for * ib_cm_dreq * * SYNOPSIS */ -typedef union _ual_cm_dreq_ioctl +typedef struct _ual_cep_dreq_ioctl { - struct _ual_cm_dreq_ioctl_in - { - uint64_t h_qp; - ib_cm_dreq_t cm_dreq; - /* Private data follows IOCTL buffer immediately */ - - } in; - struct _ual_cm_dreq_ioctl_out - { - ib_api_status_t status; - - } out; + net32_t cid; + uint8_t pdata_len; + uint8_t pdata[IB_DREQ_PDATA_SIZE]; -} ual_cm_dreq_ioctl_t; +} ual_cep_dreq_ioctl_t; /* * FIELDS * cm_dreq * CM DREQ parameters. -* -* out.status -* Status of the operation. *****/ -/****s* User-mode Access Layer/ual_cm_drep_ioctl_t +/****s* User-mode Access Layer/ual_cep_drep_ioctl_t * NAME -* ual_cm_drep_ioctl_t +* ual_cep_drep_ioctl_t * * DESCRIPTION * IOCTL structure containing the input and output parameters for @@ -2718,22 +2712,13 @@ typedef union _ual_cm_dreq_ioctl * * SYNOPSIS */ -typedef union _ual_cm_drep_ioctl +typedef struct _ual_cep_drep_ioctl { - struct _ual_cm_drep_ioctl_in - { - uint64_t h_cm_dreq; - ib_cm_drep_t cm_drep; - /* Private data follows IOCTL buffer immediately. */ + net32_t cid; + ib_cm_drep_t cm_drep; + uint8_t pdata[IB_DREP_PDATA_SIZE]; - } in; - struct _ual_cm_drep_ioctl_out - { - ib_api_status_t status; - - } out; - -} ual_cm_drep_ioctl_t; +} ual_cep_drep_ioctl_t; /* * FIELDS * in.h_cm_dreq @@ -2741,9 +2726,67 @@ typedef union _ual_cm_drep_ioctl * * in.cm_drep * CM DREP parameters. +*****/ + + + +/****s* User-mode Access Layer/ual_cep_get_timewait_ioctl_t +* NAME +* ual_cep_get_timewait_ioctl_t * -* out.status +* DESCRIPTION +* IOCTL structure containing the output parameters for +* ib_cep_get_timewait +* +* SYNOPSIS +*/ +typedef struct _ual_cep_get_timewait_ioctl +{ + ib_api_status_t status; + uint64_t timewait_us; + +} ual_cep_get_timewait_ioctl_t; +/* +* FIELDS +* in.status +* Status of the request. +* +* in.timewait_us +* Timewait value, in microseconds. +*****/ + + + +/****s* User-mode Access Layer/ual_cep_poll_ioctl_t +* NAME +* ual_cep_poll_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the output parameters to +* poll for incoming events on a CEP. The input parameter is the CID. +* +* SYNOPSIS +*/ +typedef struct _ual_cep_poll_ioctl +{ + ib_api_status_t status; + ib_cep_t new_cep; + ib_mad_element_t element; + ib_grh_t grh; + uint8_t mad_buf[MAD_BLOCK_SIZE]; + +} ual_cep_poll_ioctl_t; +/* +* FIELDS +* status * Status of the operation. +* +* new_cep +* For listen requests, CEP information of CEPs created in response +* to incoming REQs. +* +* mad_buf +* Payload of a received MAD (or failed send) *****/ diff --git a/trunk/inc/iba/ib_types.h b/trunk/inc/iba/ib_types.h index 022e78a5..12c7f653 100644 --- a/trunk/inc/iba/ib_types.h +++ b/trunk/inc/iba/ib_types.h @@ -6862,6 +6862,7 @@ ib_dm_set_slot_lo_hi( /* * Information describing an I/O controller */ +#pragma warning(disable:4324) typedef struct _ib_ioc_info { net64_t chassis_guid; @@ -6871,6 +6872,8 @@ typedef struct _ib_ioc_info ib_ioc_profile_t profile; } ib_ioc_info_t; +#pragma warning(default:4324) + /* * Defines known Communication management class versions @@ -8197,7 +8200,6 @@ typedef struct _ib_qp_mod { struct _qp_init { - ib_qp_opts_t opts; uint8_t primary_port; ib_net32_t qkey; uint16_t pkey_index; diff --git a/trunk/inc/ics_ver.h b/trunk/inc/ics_ver.h index d925eb3b..759ddd42 100644 --- a/trunk/inc/ics_ver.h +++ b/trunk/inc/ics_ver.h @@ -42,7 +42,7 @@ #endif #ifndef VER_FILEBUILD -#define VER_FILEBUILD 31 +#define VER_FILEBUILD 32 #endif #ifndef VER_FILEREV diff --git a/trunk/inc/kernel/complib/cl_timer_osd.h b/trunk/inc/kernel/complib/cl_timer_osd.h index 2b3f702c..c7c51151 100644 --- a/trunk/inc/kernel/complib/cl_timer_osd.h +++ b/trunk/inc/kernel/complib/cl_timer_osd.h @@ -58,9 +58,9 @@ extern "C" #endif /* 100ns to s conversion */ -#define HUNDREDNS_TO_SEC 10000000 +#define HUNDREDNS_TO_SEC 10000000ULL /* s to µs conversion */ -#define SEC_TO_MICRO 1000000 +#define SEC_TO_MICRO 1000000ULL CL_INLINE uint64_t CL_API cl_get_time_stamp( void ) diff --git a/trunk/inc/user/complib/cl_debug_osd.h b/trunk/inc/user/complib/cl_debug_osd.h index 4756f16f..3bf6208d 100644 --- a/trunk/inc/user/complib/cl_debug_osd.h +++ b/trunk/inc/user/complib/cl_debug_osd.h @@ -30,8 +30,6 @@ */ - - #ifndef _CL_DEBUG_OSD_H_ #define _CL_DEBUG_OSD_H_ @@ -72,16 +70,16 @@ * CL_TRACE_EXIT, and CL_EXIT macros. */ #define _CL_DBG_ENTER \ - ("%s%s%s() [\n", __MODULE__, __MOD_DELIMITER__, __FUNCTION__) + ("0x%x:%s%s%s() [\n", GetCurrentThreadId(), __MODULE__, __MOD_DELIMITER__, __FUNCTION__) #define _CL_DBG_EXIT \ - ("%s%s%s() ]\n", __MODULE__, __MOD_DELIMITER__, __FUNCTION__) + ("0x%x:%s%s%s() ]\n", GetCurrentThreadId(), __MODULE__, __MOD_DELIMITER__, __FUNCTION__) #define _CL_DBG_INFO \ - ("%s%s%s(): ", __MODULE__, __MOD_DELIMITER__, __FUNCTION__) + ("0x%x:%s%s%s(): ", GetCurrentThreadId(), __MODULE__, __MOD_DELIMITER__, __FUNCTION__) #define _CL_DBG_ERROR \ - ("%s%s%s() !ERROR!: ", __MODULE__, __MOD_DELIMITER__, __FUNCTION__) + ("0x%x:%s%s%s() !ERROR!: ", GetCurrentThreadId(), __MODULE__, __MOD_DELIMITER__, __FUNCTION__) #define CL_CHK_STK diff --git a/trunk/tests/alts/cmtests.c b/trunk/tests/alts/cmtests.c index 9396328e..235bdb0a 100644 --- a/trunk/tests/alts/cmtests.c +++ b/trunk/tests/alts/cmtests.c @@ -57,6 +57,7 @@ #define DEST_QP 1 +#pragma warning(disable:4324) typedef struct _alts_cm_ca_obj { ib_api_status_t status; @@ -98,7 +99,6 @@ typedef struct _alts_cm_ca_obj ib_qp_attr_t qp_attr[MAX_QPS]; - ib_send_wr_t *p_send_wr; ib_recv_wr_t *p_recv_wr; size_t wr_send_size; @@ -154,6 +154,7 @@ typedef struct _alts_cm_ca_obj mem_region_t mem_region[10]; } alts_cm_ca_obj_t; +#pragma warning(default:4324) #define MAX_SERVER 500 @@ -1153,47 +1154,49 @@ rdma_cq_comp_cb( p_done_cl->wr_id, ib_get_wc_status_str(p_done_cl->status) )); - - if (p_done_cl->wc_type == IB_WC_RECV) + if( p_done_cl->status == IB_WCS_SUCCESS ) { - ALTS_PRINT(ALTS_DBG_VERBOSE, - ("message length..:%d bytes\n", - p_done_cl->length )); + if (p_done_cl->wc_type == IB_WC_RECV) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("message length..:%d bytes\n", + p_done_cl->length )); - id = (uint32_t)p_done_cl->wr_id; + id = (uint32_t)p_done_cl->wr_id; - ALTS_PRINT(ALTS_DBG_VERBOSE, - ("RecvRC info:\n" - "\trecv_opt...:x%x\n" - "\timm_data...:x%x\n", - p_done_cl->recv.conn.recv_opt, - p_done_cl->recv.ud.immediate_data )); + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("RecvRC info:\n" + "\trecv_opt...:x%x\n" + "\timm_data...:x%x\n", + p_done_cl->recv.conn.recv_opt, + p_done_cl->recv.ud.immediate_data )); - if( p_ca_obj->rdma_enabled == TRUE ) + if( p_ca_obj->rdma_enabled == TRUE ) + { + process_response( p_ca_obj, + (alts_rdma_t*)p_ca_obj->mem_region[p_done_cl->wr_id].buffer, + (uint32_t)p_done_cl->wr_id ); + } + } + else + if (p_done_cl->wc_type == IB_WC_RDMA_WRITE) { + // convert request to read now + p_data = + (alts_rdma_t*)p_ca_obj->mem_region[p_done_cl->wr_id].buffer; + p_data->msg_type = 'R'; process_response( p_ca_obj, - (alts_rdma_t*)p_ca_obj->mem_region[p_done_cl->wr_id].buffer, - (uint32_t)p_done_cl->wr_id ); + p_data, + (uint32_t)p_done_cl->wr_id ); + } + else + if (p_done_cl->wc_type == IB_WC_RDMA_READ) + { + id = (uint32_t)p_done_cl->wr_id; + process_response( p_ca_obj, + (alts_rdma_t*)p_ca_obj->mem_region[p_done_cl->wr_id].buffer, + (uint32_t)p_done_cl->wr_id ); } - } - else - if (p_done_cl->wc_type == IB_WC_RDMA_WRITE) - { - // convert request to read now - p_data = - (alts_rdma_t*)p_ca_obj->mem_region[p_done_cl->wr_id].buffer; - p_data->msg_type = 'R'; - process_response( p_ca_obj, - p_data, - (uint32_t)p_done_cl->wr_id ); - } - else - if (p_done_cl->wc_type == IB_WC_RDMA_READ) - { - id = (uint32_t)p_done_cl->wr_id; - process_response( p_ca_obj, - (alts_rdma_t*)p_ca_obj->mem_region[p_done_cl->wr_id].buffer, - (uint32_t)p_done_cl->wr_id ); } p_free_wcl = p_done_cl; @@ -1893,12 +1896,18 @@ alts_cm_rep_cb( p_cm_rtu->access_ctrl |= IB_AC_RDMA_READ + IB_AC_RDMA_WRITE; } - p_cm_rtu->sq_depth = 16; - p_cm_rtu->rq_depth = 16; + if( p_ca_obj->p_ca_attr->modify_wr_depth ) + { + p_cm_rtu->sq_depth = 16; + p_cm_rtu->rq_depth = 16; + } p_cm_rtu->pfn_cm_apr_cb = alts_cm_apr_cb; p_cm_rtu->pfn_cm_dreq_cb = alts_cm_dreq_cb; ib_status = ib_cm_rtu( p_cm_rep_rec->h_cm_rep, p_cm_rtu ); + + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("ib_cm_rtu returned %s\n", ib_get_err_str( ib_status )) ); } else if ( p_cm_rep_rec->qp_type == IB_QPT_UNRELIABLE_DGRM ) @@ -2105,6 +2114,8 @@ alts_cm_req_cb( p_cm_rep->flow_ctrl = TRUE; p_cm_rep->rnr_nak_timeout = 7; p_cm_rep->rnr_retry_cnt = 7; + p_cm_rep->pfn_cm_rej_cb = alts_cm_rej_cb; + p_cm_rep->pfn_cm_mra_cb = alts_cm_mra_cb; p_cm_rep->pfn_cm_rtu_cb = alts_cm_rtu_cb; p_cm_rep->pfn_cm_lap_cb = alts_cm_lap_cb; p_cm_rep->pfn_cm_dreq_cb = alts_cm_dreq_cb; @@ -2117,10 +2128,14 @@ alts_cm_req_cb( cm_mra.svc_timeout = 21; // equals 8.5 sec wait + packet lifetime ib_status = ib_cm_mra( p_cm_req_rec->h_cm_req, &cm_mra ); + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("ib_cm_mra returned %s\n", ib_get_err_str( ib_status )) ); } else { ib_status = ib_cm_rep( p_cm_req_rec->h_cm_req, p_cm_rep ); + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("ib_cm_rep returned %s\n", ib_get_err_str( ib_status )) ); } } else @@ -2193,7 +2208,6 @@ alts_cm_client_server( p_listen->lid = p_ca_obj->dlid; p_listen->pkey = p_ca_obj->p_dest_port_attr->p_pkey_table[0]; p_listen->pfn_cm_req_cb = alts_cm_req_cb; - p_listen->pfn_cm_rej_cb = alts_cm_rej_cb; ib_status = ib_cm_listen(h_al, p_listen, alts_cm_err_cb, p_ca_obj, &p_ca_obj->h_cm_listen ); @@ -2267,6 +2281,7 @@ alts_cm_client_server( p_req_client->pfn_cm_rej_cb = alts_cm_rej_cb; p_req_client->pfn_cm_mra_cb = alts_cm_mra_cb; p_req_client->h_qp = p_ca_obj->h_qp[SRC_QP]; + p_req_client->local_resp_timeout = 12; } ib_status = ib_cm_req(p_req_client); diff --git a/trunk/tests/alts/ibquery.c b/trunk/tests/alts/ibquery.c index d70df31c..396186ea 100644 --- a/trunk/tests/alts/ibquery.c +++ b/trunk/tests/alts/ibquery.c @@ -113,7 +113,8 @@ al_test_query(void) if(ib_status != IB_SUCCESS) { ALTS_PRINT( ALTS_DBG_ERROR, - ("alts_open_al failed status = %s\n", ib_get_err_str(ib_status)) ); + ("alts_open_al failed status = %s\n", + ib_get_err_str(ib_status)) ); break; } @@ -124,7 +125,8 @@ al_test_query(void) if(ib_status != IB_SUCCESS) { ALTS_PRINT( ALTS_DBG_ERROR, - ("alts_open_ca failed status = %s\n", ib_get_err_str(ib_status)) ); + ("alts_open_ca failed status = %s\n", + ib_get_err_str(ib_status)) ); break; } @@ -136,7 +138,8 @@ al_test_query(void) if(ib_status != IB_INSUFFICIENT_MEMORY) { ALTS_PRINT(ALTS_DBG_ERROR, - ("ib_query_ca failed with status = %s\n", ib_get_err_str(ib_status)) ); + ("ib_query_ca failed with status = %s\n", + ib_get_err_str(ib_status)) ); ib_status = IB_ERROR; break; } @@ -158,7 +161,8 @@ al_test_query(void) if(ib_status != IB_SUCCESS) { ALTS_PRINT( ALTS_DBG_ERROR, - ("ib_query_ca failed with status = %s\n", ib_get_err_str(ib_status)) ); + ("ib_query_ca failed with status = %s\n", + ib_get_err_str(ib_status)) ); break; } @@ -206,7 +210,8 @@ al_test_query(void) if(ib_status != IB_SUCCESS) { ALTS_PRINT( ALTS_DBG_ERROR, - ("ib_query api failed with status %s\n",ib_get_err_str(ib_status)) ); + ("ib_query api failed with status %s\n", + ib_get_err_str(ib_status)) ); break; } @@ -231,7 +236,8 @@ al_test_query(void) if(ib_status != IB_SUCCESS) { ALTS_PRINT( ALTS_DBG_ERROR, - ("ib_query api failed with status %s\n",ib_get_err_str(ib_status)) ); + ("ib_query api failed with status %s\n", + ib_get_err_str(ib_status)) ); break; } @@ -256,7 +262,8 @@ al_test_query(void) if(ib_status != IB_SUCCESS) { ALTS_PRINT( ALTS_DBG_ERROR, - ("ib_query api failed with status %s\n",ib_get_err_str(ib_status)) ); + ("ib_query api failed with status %s\n", + ib_get_err_str(ib_status)) ); break; } @@ -281,7 +288,8 @@ al_test_query(void) if(ib_status != IB_SUCCESS) { ALTS_PRINT( ALTS_DBG_ERROR, - ("ib_query api failed with status %s\n",ib_get_err_str(ib_status)) ); + ("ib_query api failed with status %s\n", + ib_get_err_str(ib_status)) ); break; } @@ -314,7 +322,8 @@ al_test_query(void) if(ib_status != IB_SUCCESS) { ALTS_PRINT( ALTS_DBG_ERROR, - ("ib_query api failed with status %s\n",ib_get_err_str(ib_status)) ); + ("ib_query api failed with status %s\n", + ib_get_err_str(ib_status)) ); break; } @@ -349,7 +358,8 @@ al_test_query(void) if(ib_status != IB_SUCCESS) { ALTS_PRINT( ALTS_DBG_ERROR, - ("ib_reg_svc api failed with status %s\n",ib_get_err_str(ib_status)) ); + ("ib_reg_svc api failed with status %s\n", + ib_get_err_str(ib_status)) ); break; } @@ -387,7 +397,8 @@ al_test_query(void) if(ib_status != IB_SUCCESS) { ALTS_PRINT( ALTS_DBG_ERROR, - ("ib_reg_svc api failed with status %s\n",ib_get_err_str(ib_status)) ); + ("ib_reg_svc api failed with status %s\n", + ib_get_err_str(ib_status)) ); break; } @@ -395,7 +406,8 @@ al_test_query(void) if( ib_status != IB_SUCCESS ) { ALTS_PRINT( ALTS_DBG_ERROR, - ("ib_dereg_svc api failed with status %s\n",ib_get_err_str(ib_status)) ); + ("ib_dereg_svc api failed with status %s\n", + ib_get_err_str(ib_status)) ); break; } #endif diff --git a/trunk/tests/cmtest/user/main.c b/trunk/tests/cmtest/user/main.c index 9b51efa8..e487f8f4 100644 --- a/trunk/tests/cmtest/user/main.c +++ b/trunk/tests/cmtest/user/main.c @@ -77,13 +77,13 @@ typedef struct _ib_root ib_pd_handle_t h_pd; /* Input parameters to control test. */ - uint32_t num_nodes; + int32_t num_nodes; uint32_t num_msgs; boolean_t per_msg_buf; cl_mutex_t mutex; cmtest_state_t state; - uint32_t num_connected; + atomic32_t num_connected; uint32_t conn_index; /* current connection id */ uint32_t total_sent; uint32_t total_recv; @@ -532,6 +532,8 @@ __init_conn_info() g_root.cm_rep.flow_ctrl = TRUE; g_root.cm_rep.rnr_nak_timeout = 7; g_root.cm_rep.rnr_retry_cnt = 6; + g_root.cm_rep.pfn_cm_rej_cb = __rej_cb; + g_root.cm_rep.pfn_cm_mra_cb = __mra_cb; g_root.cm_rep.pfn_cm_rtu_cb = __rtu_cb; g_root.cm_rep.pfn_cm_lap_cb = __lap_cb; g_root.cm_rep.pfn_cm_dreq_cb = __dreq_cb; @@ -698,6 +700,12 @@ __rej_cb( { CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + /* + * Note - because this callback exits the app, any output beyond the + * the first time may report junk. There have been instances where + * the callback is invoked more times than there are connection requests + * but that behavior disapeared if the call to exit below is removed. + */ printf( "Connection was rejected, status: 0x%x\n", p_cm_rej_rec->rej_status ); @@ -783,11 +791,11 @@ __rep_cb( status = ib_cm_rtu( p_cm_rep_rec->h_cm_rep, &g_root.cm_rtu ); if( status != IB_SUCCESS ) { - printf( "Call to ib_cm_rtu failed\n" ); + printf( "Call to ib_cm_rtu returned %s\n", ib_get_err_str( status ) ); exit( 1 ); } - g_root.num_connected++; + cl_atomic_inc( &g_root.num_connected ); CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); } @@ -807,7 +815,7 @@ __rtu_cb( p_node->state = node_conn; __post_recvs( p_node ); - g_root.num_connected++; + cl_atomic_inc( &g_root.num_connected ); CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); } @@ -857,6 +865,7 @@ __dreq_cb( CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); CL_ASSERT( p_cm_dreq_rec ); p_node = (ib_node_t*)p_cm_dreq_rec->qp_context; + CL_ASSERT( p_node ); /* * Record that we've already received a DREQ to avoid trying to @@ -875,7 +884,7 @@ __dreq_cb( if( status == IB_SUCCESS ) { p_node->state = node_idle; - g_root.num_connected--; + cl_atomic_dec( &g_root.num_connected ); } } else @@ -900,10 +909,13 @@ __drep_cb( CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); CL_ASSERT( p_cm_drep_rec ); p_node = (ib_node_t*)p_cm_drep_rec->qp_context; + CL_ASSERT( p_node ); /* We're done with this connection. */ + cl_mutex_acquire( &g_root.mutex ); p_node->state = node_idle; - g_root.num_connected--; + cl_atomic_dec( &g_root.num_connected ); + cl_mutex_release( &g_root.mutex ); CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); } @@ -988,7 +1000,7 @@ static ib_api_status_t __create_qps() { uint64_t start_time, total_time; - uint32_t i; + int32_t i; ib_api_status_t status; printf( "Creating QPs...\n" ); @@ -1017,7 +1029,7 @@ static void __destroy_qps() { uint64_t start_time, total_time; - uint32_t i; + int32_t i; printf( "Destroying QPs...\n" ); start_time = cl_get_time_stamp(); @@ -1107,7 +1119,7 @@ __destroy_node( return (FALSE); if ( p_node->h_send_cq ) { - status = ib_destroy_cq( p_node->h_send_cq, NULL ); + status = ib_destroy_cq( p_node->h_send_cq, ib_sync_destroy ); p_node->h_send_cq = NULL; if( status != IB_SUCCESS ) { @@ -1117,7 +1129,7 @@ __destroy_node( } if (p_node->h_recv_cq) { - status = ib_destroy_cq( p_node->h_recv_cq, NULL ); + status = ib_destroy_cq( p_node->h_recv_cq, ib_sync_destroy ); p_node->h_recv_cq = NULL; if( status != IB_SUCCESS ) { @@ -1135,7 +1147,7 @@ __destroy_node( static boolean_t __create_nodes() { - uint32_t i; + int32_t i; CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); for( i = 0; i < g_root.num_nodes; i++ ) @@ -1156,7 +1168,7 @@ __create_nodes() static boolean_t __destroy_nodes() { - uint32_t i; + int32_t i; CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); @@ -1449,12 +1461,6 @@ __init_root() return FALSE; } - if( !__create_nodes() ) - { - printf( "Unable to create nodes.\n" ); - return FALSE; - } - CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); return TRUE; } @@ -1504,9 +1510,6 @@ __listen() cm_listen.qp_type = IB_QPT_RELIABLE_CONN; - cm_listen.pfn_cm_mra_cb = __mra_cb; - cm_listen.pfn_cm_rej_cb = __rej_cb; - status = ib_cm_listen( g_root.h_al, &cm_listen, __cm_listen_err_cb, &g_root, &g_root.h_listen ); if( status != IB_SUCCESS ) @@ -1526,7 +1529,7 @@ static ib_api_status_t __conn_reqs() { ib_api_status_t status; - uintn_t i; + int32_t i; uint8_t pdata[IB_REQ_PDATA_SIZE]; g_root.cm_req.p_req_pdata = pdata; @@ -1572,7 +1575,6 @@ __conn_reps() printf( "ib_cm_rep failed [%s]!\n", ib_get_err_str(status) ); return status; } - g_root.p_nodes[i].h_cm_req = NULL; } return IB_SUCCESS; } @@ -1638,7 +1640,7 @@ static void __disconnect() { ib_api_status_t status; - uint32_t i; + int32_t i; ib_node_t *p_node; uint64_t total_time, start_time; @@ -1665,28 +1667,27 @@ __disconnect() { case node_conn: g_root.cm_dreq.h_qp = p_node->h_qp; - ib_cm_dreq( &g_root.cm_dreq ); - cl_mutex_release( &g_root.mutex ); + status = ib_cm_dreq( &g_root.cm_dreq ); + if( status == IB_SUCCESS ) + p_node->state = node_dreq_sent; break; case node_dreq_rcvd: - cl_mutex_release( &g_root.mutex ); status = ib_cm_drep( p_node->h_cm_dreq, &g_root.cm_drep ); - p_node->h_cm_dreq = NULL; /* If the DREP was successful, we're done with this connection. */ if( status == IB_SUCCESS ) { p_node->state = node_idle; - g_root.num_connected--; + cl_atomic_dec( &g_root.num_connected ); } break; default: /* Node is already disconnected. */ - cl_mutex_release( &g_root.mutex ); break; } + cl_mutex_release( &g_root.mutex ); } /* Wait for all disconnections to complete. */ @@ -1714,7 +1715,8 @@ static boolean_t __send_msgs() { ib_api_status_t status; - uint32_t i, m; + int32_t i; + uint32_t m; ib_send_wr_t send_wr; ib_send_wr_t *p_send_failure; ib_local_ds_t ds_array; @@ -1785,7 +1787,6 @@ __poll_cq( ib_wc_t *p_free_wc, *p_done_wc; CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); - memset (free_wc, 0, sizeof(free_wc)); while( status != IB_NOT_FOUND ) { @@ -1799,7 +1800,7 @@ __poll_cq( /* Continue polling if nothing is done. */ if( status == IB_NOT_FOUND ) - return TRUE; + break; /* Abort if an error occurred. */ if( status != IB_SUCCESS ) @@ -1848,15 +1849,15 @@ __poll_cq( } p_done_wc = p_done_wc->p_next; } + } - if( !g_root.is_polling ) + if( !g_root.is_polling ) + { + status = ib_rearm_cq(h_cq, FALSE); + if (status != IB_SUCCESS) { - status = ib_rearm_cq(h_cq, FALSE); - if (status != IB_SUCCESS) - { - printf("Failed to rearm CQ %p\n", h_cq ); - return FALSE; - } + printf("Failed to rearm CQ %p\n", h_cq ); + return FALSE; } } @@ -1873,7 +1874,7 @@ static boolean_t __poll_send_cqs() { ib_node_t *p_node; - uintn_t i; + int32_t i; for( i = 0; i < g_root.num_nodes; i++ ) { @@ -1898,7 +1899,7 @@ static boolean_t __poll_recv_cqs() { ib_node_t *p_node; - uintn_t i; + int32_t i; for( i = 0; i < g_root.num_nodes; i++ ) { diff --git a/trunk/ulp/dapl/dapl/common/dapl_adapter_util.h b/trunk/ulp/dapl/dapl/common/dapl_adapter_util.h index d37cb353..21cd8392 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_adapter_util.h +++ b/trunk/ulp/dapl/dapl/common/dapl_adapter_util.h @@ -30,7 +30,7 @@ * * PURPOSE: Utility defs & routines for the adapter data structure * - * $Id: dapl_adapter_util.h,v 1.42 2004/06/04 20:09:43 sjs2 Exp $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/common/dapl_cno_create.c b/trunk/ulp/dapl/dapl/common/dapl_cno_create.c index c12dc4ae..f7fee96b 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_cno_create.c +++ b/trunk/ulp/dapl/dapl/common/dapl_cno_create.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 3.2.1 * - * $Id: dapl_cno_create.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_cno_free.c b/trunk/ulp/dapl/dapl/common/dapl_cno_free.c index 54d733e6..5efee23d 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_cno_free.c +++ b/trunk/ulp/dapl/dapl/common/dapl_cno_free.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 3.2.2 * - * $Id: dapl_cno_free.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_cno_modify_agent.c b/trunk/ulp/dapl/dapl/common/dapl_cno_modify_agent.c index 1fc824c0..b60ebb9d 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_cno_modify_agent.c +++ b/trunk/ulp/dapl/dapl/common/dapl_cno_modify_agent.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 3.2.4 * - * $Id: dapl_cno_modify_agent.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_cno_query.c b/trunk/ulp/dapl/dapl/common/dapl_cno_query.c index 926469f5..76b3e4ab 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_cno_query.c +++ b/trunk/ulp/dapl/dapl/common/dapl_cno_query.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 3.2.5 * - * $Id: dapl_cno_query.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_cno_util.c b/trunk/ulp/dapl/dapl/common/dapl_cno_util.c index 54ad4cab..cc83220b 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_cno_util.c +++ b/trunk/ulp/dapl/dapl/common/dapl_cno_util.c @@ -30,7 +30,7 @@ * * PURPOSE: Manage CNO Info structure * - * $Id: dapl_cno_util.c,v 1.15 2004/06/15 15:26:25 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl_ia_util.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_cno_util.h b/trunk/ulp/dapl/dapl/common/dapl_cno_util.h index 4158a3c9..4d647e6e 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_cno_util.h +++ b/trunk/ulp/dapl/dapl/common/dapl_cno_util.h @@ -30,7 +30,7 @@ * * PURPOSE: Utility defs & routines for the cno data structure * - * $Id: dapl_cno_util.h,v 1.7 2004/03/24 16:37:48 sjs2 Exp $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/common/dapl_cno_wait.c b/trunk/ulp/dapl/dapl/common/dapl_cno_wait.c index 014f10e0..0504734a 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_cno_wait.c +++ b/trunk/ulp/dapl/dapl/common/dapl_cno_wait.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 3.2.3 * - * $Id: dapl_cno_wait.c,v 1.2 2003/12/02 18:19:39 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_cookie.c b/trunk/ulp/dapl/dapl/common/dapl_cookie.c index 41fd705b..28dbddfd 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_cookie.c +++ b/trunk/ulp/dapl/dapl/common/dapl_cookie.c @@ -74,7 +74,7 @@ * * - completions are delivered in order * - * $Id: dapl_cookie.c,v 1.13 2003/06/16 17:53:32 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl_cookie.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_cookie.h b/trunk/ulp/dapl/dapl/common/dapl_cookie.h index 8c24e308..8502b2c4 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_cookie.h +++ b/trunk/ulp/dapl/dapl/common/dapl_cookie.h @@ -30,7 +30,7 @@ * * PURPOSE: Utility defs & routines for the cookie data structure * - * $Id: dapl_cookie.h,v 1.7 2003/06/13 12:21:02 sjs2 Exp $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/common/dapl_cr_accept.c b/trunk/ulp/dapl/dapl/common/dapl_cr_accept.c index 71e0ba32..9a0d07e9 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_cr_accept.c +++ b/trunk/ulp/dapl/dapl/common/dapl_cr_accept.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 4 * - * $Id: dapl_cr_accept.c,v 1.26 2004/06/04 20:09:43 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_cr_callback.c b/trunk/ulp/dapl/dapl/common/dapl_cr_callback.c index dc77585e..b67043b6 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_cr_callback.c +++ b/trunk/ulp/dapl/dapl/common/dapl_cr_callback.c @@ -33,7 +33,7 @@ * Description: Accepts asynchronous callbacks from the Communications Manager * for EVDs that have been specified as the connection_evd. * - * $Id: dapl_cr_callback.c,v 1.74 2004/06/07 13:06:57 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" @@ -95,10 +95,10 @@ dapls_cr_callback ( DAT_RETURN dat_status; dapl_dbg_log (DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK, - "--> dapl_cr_callback! context: %p event: %x cm_handle %p\n", + "--> dapl_cr_callback! context: %p event: %x cm_handle %d\n", context, ib_cm_event, - (void *) ib_cm_handle); + ib_cm_handle.cid ); /* * Passive side of the connection, context is a SP and @@ -270,7 +270,8 @@ dapls_cr_callback ( */ dapl_os_lock ( &ep_ptr->header.lock ); ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED; - ep_ptr->cm_handle = IB_INVALID_HANDLE; + cl_memclr( &ep_ptr->cm_handle, sizeof(ib_cm_handle_t) ); + ep_ptr->cm_handle.cid = 0xFFFFFFFF; dapls_ib_disconnect_clean (ep_ptr, DAT_FALSE, ib_cm_event); dapl_os_unlock ( &ep_ptr->header.lock ); diff --git a/trunk/ulp/dapl/dapl/common/dapl_cr_handoff.c b/trunk/ulp/dapl/dapl/common/dapl_cr_handoff.c index 32972261..9495d785 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_cr_handoff.c +++ b/trunk/ulp/dapl/dapl/common/dapl_cr_handoff.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 4 * - * $Id: dapl_cr_handoff.c,v 1.4 2003/06/16 17:53:32 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_cr_query.c b/trunk/ulp/dapl/dapl/common/dapl_cr_query.c index b1846012..4bb7de02 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_cr_query.c +++ b/trunk/ulp/dapl/dapl/common/dapl_cr_query.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 4 * - * $Id: dapl_cr_query.c,v 1.10 2004/05/14 16:22:56 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_cr_reject.c b/trunk/ulp/dapl/dapl/common/dapl_cr_reject.c index 2219c97e..fbe6356a 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_cr_reject.c +++ b/trunk/ulp/dapl/dapl/common/dapl_cr_reject.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 4 * - * $Id: dapl_cr_reject.c,v 1.14 2003/10/07 11:22:08 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_cr_util.c b/trunk/ulp/dapl/dapl/common/dapl_cr_util.c index 4414b6b5..e0c5b6f9 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_cr_util.c +++ b/trunk/ulp/dapl/dapl/common/dapl_cr_util.c @@ -30,7 +30,7 @@ * * PURPOSE: Manage CR (Connection Request) structure * - * $Id: dapl_cr_util.c,v 1.7 2003/08/08 19:20:05 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_cr_util.h b/trunk/ulp/dapl/dapl/common/dapl_cr_util.h index 8d051c9c..392f3ef4 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_cr_util.h +++ b/trunk/ulp/dapl/dapl/common/dapl_cr_util.h @@ -30,7 +30,7 @@ * * PURPOSE: Utility defs & routines for the CR data structure * - * $Id: dapl_cr_util.h,v 1.6 2003/06/13 12:21:03 sjs2 Exp $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/common/dapl_ep_connect.c b/trunk/ulp/dapl/dapl/common/dapl_ep_connect.c index aa41e4eb..a2c6c096 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ep_connect.c +++ b/trunk/ulp/dapl/dapl/common/dapl_ep_connect.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 5 * - * $Id: dapl_ep_connect.c,v 1.28 2004/05/14 16:22:56 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_ep_create.c b/trunk/ulp/dapl/dapl/common/dapl_ep_create.c index d0c80d4b..f68e4a0e 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ep_create.c +++ b/trunk/ulp/dapl/dapl/common/dapl_ep_create.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the kDAPL 1.1 API, Chapter 6, section 5 * - * $Id: dapl_ep_create.c,v 1.32 2004/06/02 18:12:46 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_ep_disconnect.c b/trunk/ulp/dapl/dapl/common/dapl_ep_disconnect.c index b5702807..a26f2294 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ep_disconnect.c +++ b/trunk/ulp/dapl/dapl/common/dapl_ep_disconnect.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 5 * - * $Id: dapl_ep_disconnect.c,v 1.23 2004/05/10 18:04:05 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_ep_dup_connect.c b/trunk/ulp/dapl/dapl/common/dapl_ep_dup_connect.c index 8968f5dc..d5eb6cb7 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ep_dup_connect.c +++ b/trunk/ulp/dapl/dapl/common/dapl_ep_dup_connect.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 5 * - * $Id: dapl_ep_dup_connect.c,v 1.9 2004/04/23 19:06:51 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_ep_free.c b/trunk/ulp/dapl/dapl/common/dapl_ep_free.c index 7132be62..b6c206f7 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ep_free.c +++ b/trunk/ulp/dapl/dapl/common/dapl_ep_free.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 5.4 * - * $Id: dapl_ep_free.c,v 1.29 2004/06/03 14:57:23 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_ep_get_status.c b/trunk/ulp/dapl/dapl/common/dapl_ep_get_status.c index 34ef9d5d..217581a4 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ep_get_status.c +++ b/trunk/ulp/dapl/dapl/common/dapl_ep_get_status.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 5 * - * $Id: dapl_ep_get_status.c,v 1.9 2003/07/30 18:13:37 hobie16 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_ep_modify.c b/trunk/ulp/dapl/dapl/common/dapl_ep_modify.c index 4350f2fc..6c0a84ed 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ep_modify.c +++ b/trunk/ulp/dapl/dapl/common/dapl_ep_modify.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.0 API, Chapter 6, section 5 * - * $Id: dapl_ep_modify.c,v 1.23 2003/07/11 18:42:17 hobie16 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_ep_post_rdma_read.c b/trunk/ulp/dapl/dapl/common/dapl_ep_post_rdma_read.c index def89a28..8d5b8d20 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ep_post_rdma_read.c +++ b/trunk/ulp/dapl/dapl/common/dapl_ep_post_rdma_read.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 5 * - * $Id: dapl_ep_post_rdma_read.c,v 1.9 2004/01/06 14:19:25 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl_ep_util.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_ep_post_rdma_write.c b/trunk/ulp/dapl/dapl/common/dapl_ep_post_rdma_write.c index 97abbfac..40e09ff7 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ep_post_rdma_write.c +++ b/trunk/ulp/dapl/dapl/common/dapl_ep_post_rdma_write.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 5 * - * $Id: dapl_ep_post_rdma_write.c,v 1.8 2004/01/06 14:19:25 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl_ep_util.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_ep_post_recv.c b/trunk/ulp/dapl/dapl/common/dapl_ep_post_recv.c index 94f0d14d..b0bb7948 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ep_post_recv.c +++ b/trunk/ulp/dapl/dapl/common/dapl_ep_post_recv.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 5 * - * $Id: dapl_ep_post_recv.c,v 1.19 2004/01/19 21:24:49 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_ep_post_send.c b/trunk/ulp/dapl/dapl/common/dapl_ep_post_send.c index 4bdd0512..31074f9f 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ep_post_send.c +++ b/trunk/ulp/dapl/dapl/common/dapl_ep_post_send.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 5 * - * $Id: dapl_ep_post_send.c,v 1.8 2004/01/06 14:19:25 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl_ep_util.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_ep_query.c b/trunk/ulp/dapl/dapl/common/dapl_ep_query.c index dc3c7928..77a80103 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ep_query.c +++ b/trunk/ulp/dapl/dapl/common/dapl_ep_query.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 5 * - * $Id: dapl_ep_query.c,v 1.9 2004/05/14 16:22:56 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_ep_reset.c b/trunk/ulp/dapl/dapl/common/dapl_ep_reset.c index c63bdbc0..31bb1103 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ep_reset.c +++ b/trunk/ulp/dapl/dapl/common/dapl_ep_reset.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 5.13 * - * $Id: dapl_ep_reset.c,v 1.6 2003/07/08 14:23:35 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_ep_util.c b/trunk/ulp/dapl/dapl/common/dapl_ep_util.c index 1ee5c58d..77bfcc4f 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ep_util.c +++ b/trunk/ulp/dapl/dapl/common/dapl_ep_util.c @@ -30,7 +30,7 @@ * * PURPOSE: Manage EP Info structure * - * $Id: dapl_ep_util.c,v 1.49 2004/05/10 18:04:05 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl_ep_util.h" @@ -134,7 +134,8 @@ dapl_ep_alloc ( ep_ptr->qp_handle = IB_INVALID_HANDLE; ep_ptr->qpn = 0; ep_ptr->qp_state = DAPL_QP_STATE_UNATTACHED; - ep_ptr->cm_handle = IB_INVALID_HANDLE; + cl_memclr( &ep_ptr->cm_handle, sizeof(ib_cm_handle_t) ); + ep_ptr->cm_handle.cid = 0xFFFFFFFF; ep_ptr->req_count = 0; ep_ptr->recv_count = 0; diff --git a/trunk/ulp/dapl/dapl/common/dapl_ep_util.h b/trunk/ulp/dapl/dapl/common/dapl_ep_util.h index 6ad1bc8c..a1675221 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ep_util.h +++ b/trunk/ulp/dapl/dapl/common/dapl_ep_util.h @@ -30,7 +30,7 @@ * * PURPOSE: Utility defs & routines for the EP data structure * - * $Id: dapl_ep_util.h,v 1.14 2004/01/05 13:39:05 sjs2 Exp $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/common/dapl_evd_clear_unwaitable.c b/trunk/ulp/dapl/dapl/common/dapl_evd_clear_unwaitable.c index 8393ae21..eeda8a24 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_evd_clear_unwaitable.c +++ b/trunk/ulp/dapl/dapl/common/dapl_evd_clear_unwaitable.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 3.4.8 * - * $Id: dapl_evd_clear_unwaitable.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_evd_connection_callb.c b/trunk/ulp/dapl/dapl/common/dapl_evd_connection_callb.c index 98838615..1510ad09 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_evd_connection_callb.c +++ b/trunk/ulp/dapl/dapl/common/dapl_evd_connection_callb.c @@ -33,7 +33,7 @@ * Description: Accepts asynchronous callbacks from the Communications Manager * for EVDs that have been specified as the connection_evd. * - * $Id: dapl_evd_connection_callb.c,v 1.45 2004/06/07 13:06:56 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" @@ -79,10 +79,10 @@ dapl_evd_connection_callback ( dapl_dbg_log ( DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK, - "--> dapl_evd_connection_callback: ctxt: %p event: %x cm_handle %p\n", + "--> dapl_evd_connection_callback: ctxt: %p event: %x cm_handle %d\n", context, ib_cm_event, - (void *) ib_cm_handle); + ib_cm_handle.cid); DAPL_CNTR(DCNT_EVD_CONN_CALLBACK); /* diff --git a/trunk/ulp/dapl/dapl/common/dapl_evd_cq_async_error_callb.c b/trunk/ulp/dapl/dapl/common/dapl_evd_cq_async_error_callb.c index c5f62727..aba28eaf 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_evd_cq_async_error_callb.c +++ b/trunk/ulp/dapl/dapl/common/dapl_evd_cq_async_error_callb.c @@ -30,7 +30,7 @@ * * PURPOSE: implements CQ async_callbacks from verbs * - * $Id: dapl_evd_cq_async_error_callb.c,v 1.8 2003/07/31 13:55:18 hobie16 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_evd_create.c b/trunk/ulp/dapl/dapl/common/dapl_evd_create.c index 1f1fc227..c0e16236 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_evd_create.c +++ b/trunk/ulp/dapl/dapl/common/dapl_evd_create.c @@ -33,7 +33,7 @@ * Description: Interfaces in this file are completely defined in * the uDAPL 1.1 API, Chapter 6, section 3 * - * $Id: dapl_evd_create.c,v 1.3 2004/02/09 20:34:33 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_evd_dequeue.c b/trunk/ulp/dapl/dapl/common/dapl_evd_dequeue.c index 0ceed437..bc67cab6 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_evd_dequeue.c +++ b/trunk/ulp/dapl/dapl/common/dapl_evd_dequeue.c @@ -33,7 +33,7 @@ * Description: Interfaces in this file are completely described in * the uDAPL 1.1 API, Chapter 6, section 3 * - * $Id: dapl_evd_dequeue.c,v 1.10 2004/01/06 14:19:25 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_evd_disable.c b/trunk/ulp/dapl/dapl/common/dapl_evd_disable.c index ad27aa6a..a35659c6 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_evd_disable.c +++ b/trunk/ulp/dapl/dapl/common/dapl_evd_disable.c @@ -33,7 +33,7 @@ * Description: Interfaces in this file are completely defined in * the uDAPL 1.1 API, Chapter 6, section 3 * - * $Id: dapl_evd_disable.c,v 1.1 2004/03/10 17:03:39 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_evd_dto_callb.c b/trunk/ulp/dapl/dapl/common/dapl_evd_dto_callb.c index 110f12af..76a38a1f 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_evd_dto_callb.c +++ b/trunk/ulp/dapl/dapl/common/dapl_evd_dto_callb.c @@ -30,7 +30,7 @@ * * PURPOSE: implements DTO callbacks from verbs * - * $Id: dapl_evd_dto_callb.c,v 1.18 2004/01/06 14:19:25 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_evd_enable.c b/trunk/ulp/dapl/dapl/common/dapl_evd_enable.c index 3af9ded0..6e2758e7 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_evd_enable.c +++ b/trunk/ulp/dapl/dapl/common/dapl_evd_enable.c @@ -33,7 +33,7 @@ * Description: Interfaces in this file are completely defined in * the uDAPL 1.1 API, Chapter 6, section 3 * - * $Id: dapl_evd_enable.c,v 1.1 2004/03/10 17:03:39 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_evd_free.c b/trunk/ulp/dapl/dapl/common/dapl_evd_free.c index 9696819a..3835a083 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_evd_free.c +++ b/trunk/ulp/dapl/dapl/common/dapl_evd_free.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 3 * - * $Id: dapl_evd_free.c,v 1.13 2003/12/18 21:00:53 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_evd_modify_cno.c b/trunk/ulp/dapl/dapl/common/dapl_evd_modify_cno.c index d350e7dc..88ede7dc 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_evd_modify_cno.c +++ b/trunk/ulp/dapl/dapl/common/dapl_evd_modify_cno.c @@ -33,7 +33,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 3 * - * $Id: dapl_evd_modify_cno.c,v 1.12 2003/12/17 11:31:53 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_evd_post_se.c b/trunk/ulp/dapl/dapl/common/dapl_evd_post_se.c index ef656f8e..05614fb4 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_evd_post_se.c +++ b/trunk/ulp/dapl/dapl/common/dapl_evd_post_se.c @@ -33,7 +33,7 @@ * Description: Interfaces in this file are completely defined in * the uDAPL 1.1 API, Chapter 6, section 3 * - * $Id: dapl_evd_post_se.c,v 1.7 2003/06/16 17:53:32 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_evd_qp_async_error_callb.c b/trunk/ulp/dapl/dapl/common/dapl_evd_qp_async_error_callb.c index 930479e8..70425530 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_evd_qp_async_error_callb.c +++ b/trunk/ulp/dapl/dapl/common/dapl_evd_qp_async_error_callb.c @@ -30,7 +30,7 @@ * * PURPOSE: implements QP callbacks from verbs * - * $Id: dapl_evd_qp_async_error_callb.c,v 1.17 2003/07/31 13:55:18 hobie16 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_evd_query.c b/trunk/ulp/dapl/dapl/common/dapl_evd_query.c index 66400995..5aa596bc 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_evd_query.c +++ b/trunk/ulp/dapl/dapl/common/dapl_evd_query.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 3 * - * $Id: dapl_evd_query.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_evd_resize.c b/trunk/ulp/dapl/dapl/common/dapl_evd_resize.c index a702e9e7..f183a3e8 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_evd_resize.c +++ b/trunk/ulp/dapl/dapl/common/dapl_evd_resize.c @@ -33,7 +33,7 @@ * Description: Interfaces in this file are completely defined in * the uDAPL 1.1 API, Chapter 6, section 3 * - * $Id: dapl_evd_resize.c,v 1.7 2004/01/15 20:34:44 addetia Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_evd_set_unwaitable.c b/trunk/ulp/dapl/dapl/common/dapl_evd_set_unwaitable.c index 956d6205..f2fea18f 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_evd_set_unwaitable.c +++ b/trunk/ulp/dapl/dapl/common/dapl_evd_set_unwaitable.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 3.4.7 * - * $Id: dapl_evd_set_unwaitable.c,v 1.1 2003/10/24 20:25:43 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_evd_un_async_error_callb.c b/trunk/ulp/dapl/dapl/common/dapl_evd_un_async_error_callb.c index ab3de68a..c55a280a 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_evd_un_async_error_callb.c +++ b/trunk/ulp/dapl/dapl/common/dapl_evd_un_async_error_callb.c @@ -30,7 +30,7 @@ * * PURPOSE: implements Unaffiliated callbacks from verbs * - * $Id: dapl_evd_un_async_error_callb.c,v 1.9 2003/07/31 13:55:18 hobie16 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_evd_util.c b/trunk/ulp/dapl/dapl/common/dapl_evd_util.c index bad4c46b..a5cf340f 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_evd_util.c +++ b/trunk/ulp/dapl/dapl/common/dapl_evd_util.c @@ -30,7 +30,7 @@ * * PURPOSE: Manage EVD Info structure * - * $Id: dapl_evd_util.c,v 1.56 2004/05/10 20:21:07 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl_evd_util.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_evd_util.h b/trunk/ulp/dapl/dapl/common/dapl_evd_util.h index 026249d5..7fe76c9f 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_evd_util.h +++ b/trunk/ulp/dapl/dapl/common/dapl_evd_util.h @@ -30,7 +30,7 @@ * * PURPOSE: Utility defs & routines for the EVD data structure * - * $Id: dapl_evd_util.h,v 1.11 2003/11/11 20:38:22 sjs2 Exp $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/common/dapl_evd_wait.c b/trunk/ulp/dapl/dapl/common/dapl_evd_wait.c index cfb5454d..71f3bb88 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_evd_wait.c +++ b/trunk/ulp/dapl/dapl/common/dapl_evd_wait.c @@ -33,7 +33,7 @@ * Description: Interfaces in this file are completely defined in * the uDAPL 1.1 API specification * - * $Id: dapl_evd_wait.c,v 1.1 2004/03/10 17:03:39 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_get_consumer_context.c b/trunk/ulp/dapl/dapl/common/dapl_get_consumer_context.c index 154873b2..90125b1f 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_get_consumer_context.c +++ b/trunk/ulp/dapl/dapl/common/dapl_get_consumer_context.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 2 * - * $Id: dapl_get_consumer_context.c,v 1.6 2003/10/24 20:21:18 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_get_handle_type.c b/trunk/ulp/dapl/dapl/common/dapl_get_handle_type.c index faf6cfa4..009b72fa 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_get_handle_type.c +++ b/trunk/ulp/dapl/dapl/common/dapl_get_handle_type.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 2 * - * $Id: dapl_get_handle_type.c,v 1.5 2003/08/20 13:50:45 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_hash.c b/trunk/ulp/dapl/dapl/common/dapl_hash.c index a1d1be82..7b5011ba 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_hash.c +++ b/trunk/ulp/dapl/dapl/common/dapl_hash.c @@ -33,7 +33,7 @@ * * Provides a generic hash table with chaining. * - * $Id: dapl_hash.c,v 1.13 2004/04/28 15:29:25 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl_hash.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_hash.h b/trunk/ulp/dapl/dapl/common/dapl_hash.h index 687efe3f..0c5c15b4 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_hash.h +++ b/trunk/ulp/dapl/dapl/common/dapl_hash.h @@ -30,7 +30,7 @@ * * PURPOSE: Utility defs & routines for the hash data structure * - * $Id: dapl_hash.h,v 1.6 2004/05/07 11:43:51 sjs2 Exp $ + * $Id$ **********************************************************************/ #ifndef _DAPL_HASH_H_ diff --git a/trunk/ulp/dapl/dapl/common/dapl_hca_util.c b/trunk/ulp/dapl/dapl/common/dapl_hca_util.c index e45f1c57..87204727 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_hca_util.c +++ b/trunk/ulp/dapl/dapl/common/dapl_hca_util.c @@ -30,7 +30,7 @@ * * PURPOSE: Manage HCA structure * - * $Id: dapl_hca_util.c,v 1.15 2004/04/15 15:36:25 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_hca_util.h b/trunk/ulp/dapl/dapl/common/dapl_hca_util.h index 7a03087c..f722e70d 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_hca_util.h +++ b/trunk/ulp/dapl/dapl/common/dapl_hca_util.h @@ -30,7 +30,7 @@ * * PURPOSE: Utility defs & routines for the HCA data structure * - * $Id: dapl_hca_util.h,v 1.4 2003/07/31 14:04:17 jlentini Exp $ + * $Id$ **********************************************************************/ #ifndef _DAPL_HCA_UTIL_H_ diff --git a/trunk/ulp/dapl/dapl/common/dapl_ia_close.c b/trunk/ulp/dapl/dapl/common/dapl_ia_close.c index 8e0c568e..e34fb2fa 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ia_close.c +++ b/trunk/ulp/dapl/dapl/common/dapl_ia_close.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 2 * - * $Id: dapl_ia_close.c,v 1.9 2003/07/30 18:13:38 hobie16 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_ia_open.c b/trunk/ulp/dapl/dapl/common/dapl_ia_open.c index 9645130c..283cfdbc 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ia_open.c +++ b/trunk/ulp/dapl/dapl/common/dapl_ia_open.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 2 * - * $Id: dapl_ia_open.c,v 1.35 2004/04/13 17:11:31 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_ia_query.c b/trunk/ulp/dapl/dapl/common/dapl_ia_query.c index 8e24b723..1527d1ba 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ia_query.c +++ b/trunk/ulp/dapl/dapl/common/dapl_ia_query.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 2 * - * $Id: dapl_ia_query.c,v 1.25 2004/05/14 17:28:55 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_ia_util.c b/trunk/ulp/dapl/dapl/common/dapl_ia_util.c index 23afebca..6a970db6 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ia_util.c +++ b/trunk/ulp/dapl/dapl/common/dapl_ia_util.c @@ -30,7 +30,7 @@ * * PURPOSE: Manage IA Info structure * - * $Id: dapl_ia_util.c,v 1.40 2004/04/13 17:11:31 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_ia_util.h b/trunk/ulp/dapl/dapl/common/dapl_ia_util.h index 3dbba7b4..a674a019 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ia_util.h +++ b/trunk/ulp/dapl/dapl/common/dapl_ia_util.h @@ -30,7 +30,7 @@ * * PURPOSE: Utility defs & routines for the IA data structure * - * $Id: dapl_ia_util.h,v 1.9 2003/07/25 19:24:11 sjs2 Exp $ + * $Id$ **********************************************************************/ #ifndef _DAPL_IA_UTIL_H_ diff --git a/trunk/ulp/dapl/dapl/common/dapl_init.h b/trunk/ulp/dapl/dapl/common/dapl_init.h index 0c20d642..2002c911 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_init.h +++ b/trunk/ulp/dapl/dapl/common/dapl_init.h @@ -30,7 +30,7 @@ * * PURPOSE: Prototypes for library-interface init and fini functions * - * $Id: dapl_init.h,v 1.4 2003/07/31 14:04:17 jlentini Exp $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/common/dapl_llist.c b/trunk/ulp/dapl/dapl/common/dapl_llist.c index 44d69f2a..95d2d8ef 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_llist.c +++ b/trunk/ulp/dapl/dapl/common/dapl_llist.c @@ -46,7 +46,7 @@ * Note: Each of the remove functions takes an assertion failure if * an element cannot be removed from the list. * - * $Id: dapl_llist.c,v 1.11 2004/05/04 14:02:51 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_lmr_create.c b/trunk/ulp/dapl/dapl/common/dapl_lmr_create.c index 1f2d21ca..07773d37 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_lmr_create.c +++ b/trunk/ulp/dapl/dapl/common/dapl_lmr_create.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 6 * - * $Id: dapl_lmr_create.c,v 1.3 2003/11/10 14:43:11 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl_lmr_util.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_lmr_free.c b/trunk/ulp/dapl/dapl/common/dapl_lmr_free.c index 9f81de4f..923ad9b1 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_lmr_free.c +++ b/trunk/ulp/dapl/dapl/common/dapl_lmr_free.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 6 * - * $Id: dapl_lmr_free.c,v 1.16 2003/11/10 12:51:26 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl_lmr_util.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_lmr_query.c b/trunk/ulp/dapl/dapl/common/dapl_lmr_query.c index 5e04729e..35b89c6a 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_lmr_query.c +++ b/trunk/ulp/dapl/dapl/common/dapl_lmr_query.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 6 * - * $Id: dapl_lmr_query.c,v 1.6 2003/07/30 18:13:40 hobie16 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_lmr_util.c b/trunk/ulp/dapl/dapl/common/dapl_lmr_util.c index ced94bab..99de0c9a 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_lmr_util.c +++ b/trunk/ulp/dapl/dapl/common/dapl_lmr_util.c @@ -31,7 +31,7 @@ * PURPOSE: Memory management support routines * Description: Support routines for LMR functions * - * $Id: dapl_lmr_util.c,v 1.8 2003/06/13 12:21:11 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl_lmr_util.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_lmr_util.h b/trunk/ulp/dapl/dapl/common/dapl_lmr_util.h index 1902bcaf..5e92f186 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_lmr_util.h +++ b/trunk/ulp/dapl/dapl/common/dapl_lmr_util.h @@ -30,7 +30,7 @@ * * PURPOSE: Utility defs & routines for the LMR data structure * - * $Id: dapl_lmr_util.h,v 1.6 2003/06/30 16:25:59 jlentini Exp $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/common/dapl_mr_util.c b/trunk/ulp/dapl/dapl/common/dapl_mr_util.c index 1dd7fcc3..0b931a02 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_mr_util.c +++ b/trunk/ulp/dapl/dapl/common/dapl_mr_util.c @@ -30,7 +30,7 @@ * * PURPOSE: Common Memory Management functions and data structures * - * $Id: dapl_mr_util.c,v 1.9 2003/11/10 12:51:26 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl_mr_util.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_mr_util.h b/trunk/ulp/dapl/dapl/common/dapl_mr_util.h index 3d9a3427..4b07164d 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_mr_util.h +++ b/trunk/ulp/dapl/dapl/common/dapl_mr_util.h @@ -30,7 +30,7 @@ * * PURPOSE: Utility defs & routines for memory registration functions * - * $Id: dapl_mr_util.h,v 1.5 2003/06/13 12:21:11 sjs2 Exp $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/common/dapl_provider.c b/trunk/ulp/dapl/dapl/common/dapl_provider.c index 0ed5f521..01c0db38 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_provider.c +++ b/trunk/ulp/dapl/dapl/common/dapl_provider.c @@ -31,7 +31,7 @@ * PURPOSE: Provider function table * Description: DAT Interfaces to this provider * - * $Id: dapl_provider.c,v 1.11 2003/11/18 18:55:08 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl_provider.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_provider.h b/trunk/ulp/dapl/dapl/common/dapl_provider.h index 727ad558..06a2d7ff 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_provider.h +++ b/trunk/ulp/dapl/dapl/common/dapl_provider.h @@ -31,7 +31,7 @@ * PURPOSE: Provider function table * Description: DAT Interfaces to this provider * - * $Id: dapl_provider.h,v 1.5 2004/03/17 13:59:42 sjs2 Exp $ + * $Id$ **********************************************************************/ #ifndef _DAPL_PROVIDER_H_ diff --git a/trunk/ulp/dapl/dapl/common/dapl_psp_create.c b/trunk/ulp/dapl/dapl/common/dapl_psp_create.c index 9836c035..7fd3024c 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_psp_create.c +++ b/trunk/ulp/dapl/dapl/common/dapl_psp_create.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 4 * - * $Id: dapl_psp_create.c,v 1.21 2004/02/24 17:28:30 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_psp_create_any.c b/trunk/ulp/dapl/dapl/common/dapl_psp_create_any.c index c3267003..64f1728d 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_psp_create_any.c +++ b/trunk/ulp/dapl/dapl/common/dapl_psp_create_any.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 4 * - * $Id: dapl_psp_create_any.c,v 1.7 2004/02/24 17:28:30 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_psp_free.c b/trunk/ulp/dapl/dapl/common/dapl_psp_free.c index b8cce22a..1f184279 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_psp_free.c +++ b/trunk/ulp/dapl/dapl/common/dapl_psp_free.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 4 * - * $Id: dapl_psp_free.c,v 1.20 2003/10/07 11:22:08 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_psp_query.c b/trunk/ulp/dapl/dapl/common/dapl_psp_query.c index 83bde559..6bd49c89 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_psp_query.c +++ b/trunk/ulp/dapl/dapl/common/dapl_psp_query.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 4 * - * $Id: dapl_psp_query.c,v 1.8 2003/06/23 12:28:05 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_pz_create.c b/trunk/ulp/dapl/dapl/common/dapl_pz_create.c index a0e52f03..b129b845 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_pz_create.c +++ b/trunk/ulp/dapl/dapl/common/dapl_pz_create.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 6 * - * $Id: dapl_pz_create.c,v 1.7 2003/07/30 18:13:40 hobie16 Exp $ + * $Id$ **********************************************************************/ #include "dapl_pz_util.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_pz_free.c b/trunk/ulp/dapl/dapl/common/dapl_pz_free.c index b8d206d4..95c3950a 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_pz_free.c +++ b/trunk/ulp/dapl/dapl/common/dapl_pz_free.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 6 * - * $Id: dapl_pz_free.c,v 1.9 2003/07/30 18:13:40 hobie16 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_pz_query.c b/trunk/ulp/dapl/dapl/common/dapl_pz_query.c index 5fda90f9..7ebcd534 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_pz_query.c +++ b/trunk/ulp/dapl/dapl/common/dapl_pz_query.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 6 * - * $Id: dapl_pz_query.c,v 1.6 2003/07/30 18:13:40 hobie16 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_pz_util.c b/trunk/ulp/dapl/dapl/common/dapl_pz_util.c index 8f4d0976..ac3d871e 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_pz_util.c +++ b/trunk/ulp/dapl/dapl/common/dapl_pz_util.c @@ -30,7 +30,7 @@ * * PURPOSE: Manage PZ structure * - * $Id: dapl_pz_util.c,v 1.7 2003/06/13 12:21:11 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl_pz_util.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_pz_util.h b/trunk/ulp/dapl/dapl/common/dapl_pz_util.h index 7a675162..6e90fd72 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_pz_util.h +++ b/trunk/ulp/dapl/dapl/common/dapl_pz_util.h @@ -30,7 +30,7 @@ * * PURPOSE: Utility defs & routines for the PZ data structure * - * $Id: dapl_pz_util.h,v 1.4 2003/06/13 12:21:11 sjs2 Exp $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/common/dapl_ring_buffer_util.c b/trunk/ulp/dapl/dapl/common/dapl_ring_buffer_util.c index 2b2af89d..11d574ff 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ring_buffer_util.c +++ b/trunk/ulp/dapl/dapl/common/dapl_ring_buffer_util.c @@ -31,7 +31,7 @@ * PURPOSE: Ring buffer management * Description: Support and management functions for ring buffers * - * $Id: dapl_ring_buffer_util.c,v 1.11 2004/03/24 16:30:52 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl_ring_buffer_util.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_ring_buffer_util.h b/trunk/ulp/dapl/dapl/common/dapl_ring_buffer_util.h index 5097e3d1..58a585b1 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_ring_buffer_util.h +++ b/trunk/ulp/dapl/dapl/common/dapl_ring_buffer_util.h @@ -30,7 +30,7 @@ * * PURPOSE: Utility defs & routines for the ring buffer data structure * - * $Id: dapl_ring_buffer_util.h,v 1.6 2003/11/11 20:38:22 sjs2 Exp $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/common/dapl_rmr_bind.c b/trunk/ulp/dapl/dapl/common/dapl_rmr_bind.c index 232e3d8d..4de96685 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_rmr_bind.c +++ b/trunk/ulp/dapl/dapl/common/dapl_rmr_bind.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 6 * - * $Id: dapl_rmr_bind.c,v 1.18 2004/01/27 18:42:12 addetia Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_rmr_create.c b/trunk/ulp/dapl/dapl/common/dapl_rmr_create.c index a4a9c0c3..3b47d761 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_rmr_create.c +++ b/trunk/ulp/dapl/dapl/common/dapl_rmr_create.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 6 * - * $Id: dapl_rmr_create.c,v 1.7 2003/11/04 17:08:30 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl_rmr_util.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_rmr_free.c b/trunk/ulp/dapl/dapl/common/dapl_rmr_free.c index e7a6271b..15878bd5 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_rmr_free.c +++ b/trunk/ulp/dapl/dapl/common/dapl_rmr_free.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 6 * - * $Id: dapl_rmr_free.c,v 1.15 2003/11/04 17:08:30 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl_rmr_util.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_rmr_query.c b/trunk/ulp/dapl/dapl/common/dapl_rmr_query.c index e37ad6c2..500b7b05 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_rmr_query.c +++ b/trunk/ulp/dapl/dapl/common/dapl_rmr_query.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 6 * - * $Id: dapl_rmr_query.c,v 1.5 2003/06/16 17:53:34 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_rmr_util.h b/trunk/ulp/dapl/dapl/common/dapl_rmr_util.h index 5f0bc1d4..e9cf2eb9 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_rmr_util.h +++ b/trunk/ulp/dapl/dapl/common/dapl_rmr_util.h @@ -30,7 +30,7 @@ * * PURPOSE: Utility defs & routines for the RMR data structure * - * $Id: dapl_rmr_util.h,v 1.5 2003/06/30 16:25:59 jlentini Exp $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/common/dapl_rsp_create.c b/trunk/ulp/dapl/dapl/common/dapl_rsp_create.c index ccc9fb30..2434369c 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_rsp_create.c +++ b/trunk/ulp/dapl/dapl/common/dapl_rsp_create.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 4 * - * $Id: dapl_rsp_create.c,v 1.16 2004/02/24 17:28:30 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_rsp_free.c b/trunk/ulp/dapl/dapl/common/dapl_rsp_free.c index 55f8fa64..2c279e90 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_rsp_free.c +++ b/trunk/ulp/dapl/dapl/common/dapl_rsp_free.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 4 * - * $Id: dapl_rsp_free.c,v 1.20 2004/01/29 21:14:35 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_rsp_query.c b/trunk/ulp/dapl/dapl/common/dapl_rsp_query.c index 552f21c0..dc811de7 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_rsp_query.c +++ b/trunk/ulp/dapl/dapl/common/dapl_rsp_query.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 4 * - * $Id: dapl_rsp_query.c,v 1.6 2003/06/16 17:53:34 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_set_consumer_context.c b/trunk/ulp/dapl/dapl/common/dapl_set_consumer_context.c index cc24e4ad..7f0914ef 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_set_consumer_context.c +++ b/trunk/ulp/dapl/dapl/common/dapl_set_consumer_context.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 2 * - * $Id: dapl_set_consumer_context.c,v 1.6 2003/08/20 13:50:45 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/common/dapl_sp_util.c b/trunk/ulp/dapl/dapl/common/dapl_sp_util.c index 57d31e18..a5554bf4 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_sp_util.c +++ b/trunk/ulp/dapl/dapl/common/dapl_sp_util.c @@ -30,7 +30,7 @@ * * PURPOSE: Manage PSP Info structure * - * $Id: dapl_sp_util.c,v 1.12 2003/12/18 18:00:43 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" @@ -205,7 +205,7 @@ dapl_sp_search_cr ( do { - if ( cr_ptr->ib_cm_handle == ib_cm_handle ) + if ( cr_ptr->ib_cm_handle.cid == ib_cm_handle.cid ) { cr_ptr_fnd = cr_ptr; diff --git a/trunk/ulp/dapl/dapl/common/dapl_sp_util.h b/trunk/ulp/dapl/dapl/common/dapl_sp_util.h index 89009272..1442ace0 100644 --- a/trunk/ulp/dapl/dapl/common/dapl_sp_util.h +++ b/trunk/ulp/dapl/dapl/common/dapl_sp_util.h @@ -30,7 +30,7 @@ * * PURPOSE: Utility defs & routines for the PSP & RSP data structure * - * $Id: dapl_sp_util.h,v 1.6 2003/12/18 18:00:43 sjs2 Exp $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/ibal/dapl_ibal_cm.c b/trunk/ulp/dapl/dapl/ibal/dapl_ibal_cm.c index 0b89baaa..cd180bdd 100644 --- a/trunk/ulp/dapl/dapl/ibal/dapl_ibal_cm.c +++ b/trunk/ulp/dapl/dapl/ibal/dapl_ibal_cm.c @@ -15,7 +15,7 @@ * * PURPOSE: IB Connection routines for access to IBAL APIs * - * $Id: dapl_ibal_cm.c 1.38 04/08/06 19:29:06-04:00 aestrin@aestrin.infiniconsys.com $ + * $Id$ * **********************************************************************/ @@ -53,7 +53,7 @@ dapli_ib_reg_svc_cb ( { DAPL_HCA *hca_ptr; - hca_ptr = (DAPL_HCA *) p_reg_svc_rec->svc_context; + hca_ptr = (DAPL_HCA * __ptr64) p_reg_svc_rec->svc_context; dapl_os_assert (hca_ptr); @@ -419,6 +419,7 @@ DAT_RETURN dapls_ib_ns_create_gid_map ( IN DAPL_HCA *hca_ptr) { + UNUSED_PARAM( hca_ptr ); return (DAT_SUCCESS); } @@ -427,6 +428,7 @@ DAT_RETURN dapls_ib_ns_remove_gid_map ( IN DAPL_HCA *hca_ptr) { + UNUSED_PARAM( hca_ptr ); return (DAT_SUCCESS); } @@ -461,7 +463,7 @@ dapli_ib_sa_query_cb ( p_path_rec = ib_get_query_path_rec (p_query_rec->p_result_mad, 0); if (p_path_rec) { - dapl_os_memcpy ((void *) p_query_rec->query_context, + dapl_os_memcpy ((void * __ptr64) p_query_rec->query_context, (void *) p_path_rec, sizeof (ib_path_rec_t)); dapl_dbg_log ( @@ -484,7 +486,7 @@ dapli_ib_sa_query_cb ( p_svc_rec = ib_get_query_svc_rec (p_query_rec->p_result_mad, 0); if (p_svc_rec) { - dapl_os_memcpy ((void *) p_query_rec->query_context, + dapl_os_memcpy ((void * __ptr64) p_query_rec->query_context, (void *) p_svc_rec, sizeof (ib_service_record_t)); dapl_dbg_log ( @@ -506,7 +508,7 @@ dapli_ib_sa_query_cb ( { ib_user_query_t *p_user_query; - p_user_query = (ib_user_query_t *) p_query_rec->query_context; + p_user_query = (ib_user_query_t * __ptr64) p_query_rec->query_context; if (p_user_query) { switch (p_user_query->attr_id) @@ -574,6 +576,7 @@ static void dapli_ibal_listen_err_cb ( IN ib_listen_err_rec_t *p_listen_err_rec ) { + UNUSED_PARAM( p_listen_err_rec ); dapl_dbg_log (DAPL_DBG_TYPE_CM, "--> %s: CM callback listen error\n", "DiLEcb"); } @@ -582,6 +585,7 @@ static void dapli_ib_cm_apr_cb ( IN ib_cm_apr_rec_t *p_cm_apr_rec ) { + UNUSED_PARAM( p_cm_apr_rec ); dapl_dbg_log (DAPL_DBG_TYPE_CM, "--> DiCAcb: CM callback APR (Alternate Path Request)\n"); } @@ -590,6 +594,7 @@ static void dapli_ib_cm_lap_cb ( IN ib_cm_lap_rec_t *p_cm_lap_rec ) { + UNUSED_PARAM( p_cm_lap_rec ); dapl_dbg_log (DAPL_DBG_TYPE_CM, "--> DiCLcb: CM callback LAP (Load Alternate Path)\n"); } @@ -604,7 +609,7 @@ dapli_ib_cm_dreq_cb ( dapl_os_assert (p_cm_dreq_rec); - ep_ptr = (DAPL_EP *) p_cm_dreq_rec->qp_context; + ep_ptr = (DAPL_EP * __ptr64) p_cm_dreq_rec->qp_context; if ( ep_ptr == NULL || ep_ptr->header.magic == DAPL_MAGIC_INVALID ) @@ -652,7 +657,7 @@ dapli_ib_cm_dreq_cb ( dapls_cr_callback ( p_cm_dreq_rec->h_cm_dreq, IB_CME_DISCONNECTED, - (void *) p_cm_dreq_rec->p_dreq_pdata, + (void * __ptr64) p_cm_dreq_rec->p_dreq_pdata, (void *) sp_ptr, NULL); } @@ -662,7 +667,7 @@ dapli_ib_cm_dreq_cb ( dapl_evd_connection_callback ( p_cm_dreq_rec->h_cm_dreq, IB_CME_DISCONNECTED, - (void *) p_cm_dreq_rec->p_dreq_pdata, + (void * __ptr64) p_cm_dreq_rec->p_dreq_pdata, p_cm_dreq_rec->qp_context); } } @@ -676,7 +681,7 @@ dapli_ib_cm_drep_cb ( dapl_os_assert (p_cm_drep_rec != NULL); - ep_ptr = (DAPL_EP *) p_cm_drep_rec->qp_context; + ep_ptr = (DAPL_EP * __ptr64) p_cm_drep_rec->qp_context; if (ep_ptr) { @@ -699,7 +704,7 @@ dapli_ib_cm_drep_cb ( dapls_cr_callback ( ep_ptr->cm_handle, IB_CME_DISCONNECTED, - (void *) p_cm_drep_rec->p_drep_pdata, + (void * __ptr64) p_cm_drep_rec->p_drep_pdata, (void *) sp_ptr, NULL); } @@ -709,7 +714,7 @@ dapli_ib_cm_drep_cb ( dapl_evd_connection_callback ( ep_ptr->cm_handle, IB_CME_DISCONNECTED, - (void *) p_cm_drep_rec->p_drep_pdata, + (void * __ptr64) p_cm_drep_rec->p_drep_pdata, p_cm_drep_rec->qp_context); } } @@ -731,10 +736,10 @@ dapli_ib_cm_rep_cb ( dapl_os_memzero (&cm_rtu, sizeof ( ib_cm_rtu_t )); - dapl_os_assert ( ((DAPL_HEADER *) p_cm_rep_rec->qp_context)->magic == + dapl_os_assert ( ((DAPL_HEADER * __ptr64) p_cm_rep_rec->qp_context)->magic == DAPL_MAGIC_EP ); - ep_ptr = (DAPL_EP *) p_cm_rep_rec->qp_context; + ep_ptr = (DAPL_EP * __ptr64) p_cm_rep_rec->qp_context; dapl_dbg_log (DAPL_DBG_TYPE_CM, "--> DiCRpcb: EP = %p local_max_rdma_read_in %d\n", ep_ptr, p_cm_rep_rec->resp_res); @@ -766,7 +771,7 @@ dapli_ib_cm_rep_cb ( cm_cb_op = IB_CME_LOCAL_FAILURE; } - prd_ptr = (DAPL_PRIVATE *) p_cm_rep_rec->p_rep_pdata; + prd_ptr = (DAPL_PRIVATE * __ptr64) p_cm_rep_rec->p_rep_pdata; #ifdef DAPL_DBG #if 0 @@ -791,7 +796,7 @@ dapli_ib_cm_rep_cb ( p_cm_rep_rec->h_cm_rep, cm_cb_op, (void *) prd_ptr, - (void *) p_cm_rep_rec->qp_context); + (void * __ptr64) p_cm_rep_rec->qp_context); } @@ -804,7 +809,7 @@ dapli_ib_cm_rej_cb ( dapl_os_assert (p_cm_rej_rec); - ep_ptr = (DAPL_EP *) p_cm_rej_rec->qp_context; + ep_ptr = (DAPL_EP * __ptr64) p_cm_rej_rec->qp_context; dapl_dbg_log (DAPL_DBG_TYPE_CM, "--> DiCRjcb: EP = %p QP = %p rej reason = 0x%x\n", @@ -865,16 +870,16 @@ dapli_ib_cm_rej_cb ( { dapls_cr_callback ( ep_ptr->cm_handle, cm_event, - (void *) p_cm_rej_rec->p_rej_pdata, + (void * __ptr64) p_cm_rej_rec->p_rej_pdata, (void *) ((DAPL_CR *) ep_ptr->cr_ptr)->sp_ptr, NULL); } else { - dapl_evd_connection_callback ( NULL, + dapl_evd_connection_callback ( ep_ptr->cm_handle, cm_event, - (void *) p_cm_rej_rec->p_rej_pdata, - (void *) p_cm_rej_rec->qp_context); + (void * __ptr64) p_cm_rej_rec->p_rej_pdata, + (void * __ptr64) p_cm_rej_rec->qp_context); } } @@ -889,7 +894,7 @@ dapli_ib_cm_req_cb ( dapl_os_assert (p_cm_req_rec); - sp_ptr = (DAPL_SP *) p_cm_req_rec->context; + sp_ptr = (DAPL_SP * __ptr64) p_cm_req_rec->context; dapl_os_assert (sp_ptr); @@ -970,8 +975,8 @@ dapli_ib_cm_req_cb ( dapls_cr_callback ( p_cm_req_rec->h_cm_req, IB_CME_CONNECTION_REQUEST_PENDING, - (void *) p_cm_req_rec->p_req_pdata, - (void *) p_cm_req_rec->context, + (void * __ptr64) p_cm_req_rec->p_req_pdata, + (void * __ptr64) p_cm_req_rec->context, (DAT_IA_ADDRESS_PTR)&dest_ia_addr); } @@ -980,6 +985,7 @@ static void dapli_ib_cm_mra_cb ( IN ib_cm_mra_rec_t *p_cm_mra_rec ) { + UNUSED_PARAM( p_cm_mra_rec ); dapl_dbg_log (DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK, "--> DiCMcb: CM callback MRA\n"); } @@ -992,7 +998,7 @@ dapli_ib_cm_rtu_cb ( dapl_os_assert (p_cm_rtu_rec != NULL); - ep_ptr = (DAPL_EP *) p_cm_rtu_rec->qp_context; + ep_ptr = (DAPL_EP * __ptr64) p_cm_rtu_rec->qp_context; dapl_dbg_log (DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK, "--> DiCRucb: EP = %p QP = %p\n", ep_ptr, ep_ptr->qp_handle); @@ -1005,7 +1011,7 @@ dapli_ib_cm_rtu_cb ( dapls_cr_callback ( ep_ptr->cm_handle, IB_CME_CONNECTED, - (void *) p_cm_rtu_rec->p_rtu_pdata, + (void * __ptr64) p_cm_rtu_rec->p_rtu_pdata, (void *) sp_ptr, NULL); @@ -1015,7 +1021,7 @@ dapli_ib_cm_rtu_cb ( dapl_evd_connection_callback ( ep_ptr->cm_handle, IB_CME_CONNECTED, - (void *) p_cm_rtu_rec->p_rtu_pdata, + (void * __ptr64) p_cm_rtu_rec->p_rtu_pdata, (void *) ep_ptr); } } @@ -1039,6 +1045,9 @@ dapls_ib_cm_remote_addr ( IN DAPL_PRIVATE *prd_ptr, OUT DAT_SOCK_ADDR6 *remote_ia_address ) { + UNUSED_PARAM( dat_handle ); + UNUSED_PARAM( prd_ptr ); + UNUSED_PARAM( remote_ia_address ); return DAT_SUCCESS; } @@ -1320,6 +1329,8 @@ DAT_RETURN ib_api_status_t ib_status; ib_cm_dreq_t cm_dreq; + UNUSED_PARAM( disconnect_flags ); + ia_ptr = ep_ptr->header.owner_ia; ib_status = IB_SUCCESS; @@ -1441,8 +1452,6 @@ dapls_ib_setup_conn_listener ( * Register request or mra callback functions */ cm_listen.pfn_cm_req_cb = dapli_ib_cm_req_cb; - cm_listen.pfn_cm_rej_cb = dapli_ib_cm_rej_cb; - cm_listen.pfn_cm_mra_cb = dapli_ib_cm_mra_cb; ib_status = ib_cm_listen ( dapl_ibal_root.h_al, &cm_listen, @@ -1483,6 +1492,8 @@ DAT_RETURN dapls_ib_remove_conn_listener ( ib_api_status_t ib_status; DAT_RETURN dat_status = DAT_SUCCESS; + UNUSED_PARAM( ia_ptr ); + if (sp_ptr->cm_srvc_handle) { ib_status = ib_cm_cancel (sp_ptr->cm_srvc_handle, @@ -1564,7 +1575,7 @@ dapls_ib_reject_connection ( if (ib_status != IB_SUCCESS) { dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsRjC: cm_handle = %p reject failed = %s\n", - ib_cm_handle, ib_get_err_str(ib_status)); + &ib_cm_handle, ib_get_err_str(ib_status)); } return ( dapl_ib_status_convert ( ib_status ) ); @@ -1676,6 +1687,8 @@ dapls_ib_accept_connection ( #endif #endif + cm_rep.pfn_cm_rej_cb = dapli_ib_cm_rej_cb; + cm_rep.pfn_cm_mra_cb = dapli_ib_cm_mra_cb; cm_rep.pfn_cm_rtu_cb = dapli_ib_cm_rtu_cb; cm_rep.pfn_cm_lap_cb = dapli_ib_cm_lap_cb; cm_rep.pfn_cm_dreq_cb = dapli_ib_cm_dreq_cb; @@ -1816,7 +1829,7 @@ dapls_ib_cr_handoff ( cr_ptr = (DAPL_CR *) cr_handle; - if (cr_ptr->ib_cm_handle == IB_INVALID_HANDLE) + if (cr_ptr->ib_cm_handle.cid == 0xFFFFFFFF) { dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsCH: CR = %p invalid cm handle\n", cr_ptr); return DAT_INVALID_PARAMETER; @@ -1908,6 +1921,8 @@ dapls_ib_private_data_size ( { int size; + UNUSED_PARAM( prd_ptr ); + switch (conn_op) { case DAPL_PDATA_CONN_REQ: diff --git a/trunk/ulp/dapl/dapl/ibal/dapl_ibal_dto.h b/trunk/ulp/dapl/dapl/ibal/dapl_ibal_dto.h index e70a2dad..d71feace 100644 --- a/trunk/ulp/dapl/dapl/ibal/dapl_ibal_dto.h +++ b/trunk/ulp/dapl/dapl/ibal/dapl_ibal_dto.h @@ -16,7 +16,7 @@ * PURPOSE: Utility routines for data transfer operations using the * IBAL APIs * - * $Id: dapl_ibal_dto.h 1.15 04/07/14 18:37:35-04:00 aestrin@aestrin.infiniconsys.com $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/ibal/dapl_ibal_kmod.h b/trunk/ulp/dapl/dapl/ibal/dapl_ibal_kmod.h index 5ba07d08..89ce48a8 100644 --- a/trunk/ulp/dapl/dapl/ibal/dapl_ibal_kmod.h +++ b/trunk/ulp/dapl/dapl/ibal/dapl_ibal_kmod.h @@ -15,7 +15,7 @@ * * PURPOSE: Utility defs & routines for access to Intel IBAL APIs * - * $Id: dapl_ibal_kmod.h,v 1.1.1.1 2002/10/16 15:10:17 vu Exp $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/ibal/dapl_ibal_mrdb.c b/trunk/ulp/dapl/dapl/ibal/dapl_ibal_mrdb.c index 7c5aa30b..ea7f37c5 100644 --- a/trunk/ulp/dapl/dapl/ibal/dapl_ibal_mrdb.c +++ b/trunk/ulp/dapl/dapl/ibal/dapl_ibal_mrdb.c @@ -15,7 +15,7 @@ * * PURPOSE: Utility routines for access to IBAL APIs * - * $Id: dapl_ibal_mrdb.c,v 1.1.1.1 2002/10/16 15:10:17 vu Exp $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/ibal/dapl_ibal_mrdb.h b/trunk/ulp/dapl/dapl/ibal/dapl_ibal_mrdb.h index 0ed9b869..eb89ee0e 100644 --- a/trunk/ulp/dapl/dapl/ibal/dapl_ibal_mrdb.h +++ b/trunk/ulp/dapl/dapl/ibal/dapl_ibal_mrdb.h @@ -15,7 +15,7 @@ * * PURPOSE: Utility defs & routines for access to Intel IBAL APIs * - * $Id: dapl_ibal_mrdb.h,v 1.1.1.1 2002/10/16 15:10:17 vu Exp $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/ibal/dapl_ibal_qp.c b/trunk/ulp/dapl/dapl/ibal/dapl_ibal_qp.c index 70591758..2d3ea964 100644 --- a/trunk/ulp/dapl/dapl/ibal/dapl_ibal_qp.c +++ b/trunk/ulp/dapl/dapl/ibal/dapl_ibal_qp.c @@ -15,7 +15,7 @@ * * PURPOSE: IB QP routines for access to IBAL APIs * - * $Id: dapl_ibal_qp.c 1.25 04/08/06 18:43:01-04:00 aestrin@aestrin.infiniconsys.com $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/ibal/dapl_ibal_util.c b/trunk/ulp/dapl/dapl/ibal/dapl_ibal_util.c index 2235ce49..34afba3d 100644 --- a/trunk/ulp/dapl/dapl/ibal/dapl_ibal_util.c +++ b/trunk/ulp/dapl/dapl/ibal/dapl_ibal_util.c @@ -15,7 +15,7 @@ * * PURPOSE: Utility routines for access to IBAL APIs * - * $Id: dapl_ibal_util.c 1.28 04/07/14 18:37:36-04:00 aestrin@aestrin.infiniconsys.com $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/ibal/dapl_ibal_util.h b/trunk/ulp/dapl/dapl/ibal/dapl_ibal_util.h index a8ac61df..4a108a87 100644 --- a/trunk/ulp/dapl/dapl/ibal/dapl_ibal_util.h +++ b/trunk/ulp/dapl/dapl/ibal/dapl_ibal_util.h @@ -15,7 +15,7 @@ * * PURPOSE: Utility defs & routines for access to Intel IBAL APIs * - * $Id: dapl_ibal_util.h,v 1.1.1.1 2002/10/16 15:10:17 vu Exp $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/include/dapl.h b/trunk/ulp/dapl/dapl/include/dapl.h index 8cb8fd2b..2af45256 100644 --- a/trunk/ulp/dapl/dapl/include/dapl.h +++ b/trunk/ulp/dapl/dapl/include/dapl.h @@ -34,7 +34,7 @@ * DAPL RI. * * - * $Id: dapl.h,v 1.77 2004/06/04 13:20:05 sjs2 Exp $ + * $Id$ **********************************************************************/ #ifndef _DAPL_H_ diff --git a/trunk/ulp/dapl/dapl/include/dapl_debug.h b/trunk/ulp/dapl/dapl/include/dapl_debug.h index 7b847b6a..abb54b4e 100644 --- a/trunk/ulp/dapl/dapl/include/dapl_debug.h +++ b/trunk/ulp/dapl/dapl/include/dapl_debug.h @@ -34,7 +34,7 @@ * Description: * * - * $Id: dapl_debug.h,v 1.5 2003/12/18 17:55:39 sjs2 Exp $ + * $Id$ **********************************************************************/ #ifndef _DAPL_DEBUG_H_ diff --git a/trunk/ulp/dapl/dapl/include/dapl_ipoib_names.h b/trunk/ulp/dapl/dapl/include/dapl_ipoib_names.h index d3214a7f..f0d117d8 100644 --- a/trunk/ulp/dapl/dapl/include/dapl_ipoib_names.h +++ b/trunk/ulp/dapl/dapl/include/dapl_ipoib_names.h @@ -38,7 +38,7 @@ * * NOTE: As implementations mature this may not be necessary. * - * $Id: dapl_ipoib_names.h,v 1.4 2003/06/13 12:21:13 sjs2 Exp $ + * $Id$ **********************************************************************/ #ifndef _IPOIB_NAMING_H_ diff --git a/trunk/ulp/dapl/dapl/include/dapl_vendor.h b/trunk/ulp/dapl/dapl/include/dapl_vendor.h index c2070cd4..1741b871 100644 --- a/trunk/ulp/dapl/dapl/include/dapl_vendor.h +++ b/trunk/ulp/dapl/dapl/include/dapl_vendor.h @@ -33,7 +33,7 @@ * these values are returned in the DAT_IA_ATTR parameter of * dat_ia_query() * - * $Id: dapl_vendor.h,v 1.3 2003/06/30 13:12:54 sjs2 Exp $ + * $Id$ **********************************************************************/ /********************************************************************** diff --git a/trunk/ulp/dapl/dapl/udapl/dapl_init.c b/trunk/ulp/dapl/dapl/udapl/dapl_init.c index 44c5496e..856be598 100644 --- a/trunk/ulp/dapl/dapl/udapl/dapl_init.c +++ b/trunk/ulp/dapl/dapl/udapl/dapl_init.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in * the DAPL 1.1 API, Chapter 6, section 2 * - * $Id: dapl_init.c,v 1.50 2004/01/06 14:21:59 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/udapl/dapl_name_service.c b/trunk/ulp/dapl/dapl/udapl/dapl_name_service.c index a8558d11..1adcad58 100644 --- a/trunk/ulp/dapl/dapl/udapl/dapl_name_service.c +++ b/trunk/ulp/dapl/dapl/udapl/dapl_name_service.c @@ -36,7 +36,7 @@ * Description: Interfaces in this file are completely described in * dapl_name_service.h * - * $Id: dapl_name_service.c,v 1.2 2004/02/25 13:21:43 sjs2 Exp $ + * $Id$ **********************************************************************/ /* diff --git a/trunk/ulp/dapl/dapl/udapl/dapl_name_service.h b/trunk/ulp/dapl/dapl/udapl/dapl_name_service.h index 573323db..8982f59d 100644 --- a/trunk/ulp/dapl/dapl/udapl/dapl_name_service.h +++ b/trunk/ulp/dapl/dapl/udapl/dapl_name_service.h @@ -30,7 +30,7 @@ * * PURPOSE: Utility defs & routines supporting name services * - * $Id: dapl_name_service.h,v 1.1 2003/10/28 14:43:22 sjs2 Exp $ + * $Id$ * **********************************************************************/ diff --git a/trunk/ulp/dapl/dapl/udapl/dapl_timer_util.c b/trunk/ulp/dapl/dapl/udapl/dapl_timer_util.c index 45354f5a..7907a69d 100644 --- a/trunk/ulp/dapl/dapl/udapl/dapl_timer_util.c +++ b/trunk/ulp/dapl/dapl/udapl/dapl_timer_util.c @@ -45,7 +45,7 @@ * This file also contains the timer handler thread, * embodied in dapls_timer_thread(). * - * $Id: dapl_timer_util.c,v 1.2 2004/01/05 20:50:21 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dapl.h" diff --git a/trunk/ulp/dapl/dapl/udapl/dapl_timer_util.h b/trunk/ulp/dapl/dapl/udapl/dapl_timer_util.h index 0d99f860..21cea8cb 100644 --- a/trunk/ulp/dapl/dapl/udapl/dapl_timer_util.h +++ b/trunk/ulp/dapl/dapl/udapl/dapl_timer_util.h @@ -31,7 +31,7 @@ * PURPOSE: DAPL timer management * Description: support for dapl_timer.h * - * $Id: dapl_timer_util.h,v 1.2 2004/01/05 13:39:05 sjs2 Exp $ + * $Id$ **********************************************************************/ void dapls_timer_init ( void ); diff --git a/trunk/ulp/dapl/dapl/udapl/linux/dapl_osd.c b/trunk/ulp/dapl/dapl/udapl/linux/dapl_osd.c index 539b4c7a..fec18fe7 100644 --- a/trunk/ulp/dapl/dapl/udapl/linux/dapl_osd.c +++ b/trunk/ulp/dapl/dapl/udapl/linux/dapl_osd.c @@ -35,7 +35,7 @@ * of common functions. * * - * $Id: dapl_osd.c,v 1.26 2003/07/31 14:04:18 jlentini Exp $ + * $Id$ **********************************************************************/ #include "dapl_osd.h" diff --git a/trunk/ulp/dapl/dapl/udapl/linux/dapl_osd.h b/trunk/ulp/dapl/dapl/udapl/linux/dapl_osd.h index 85897210..3f786b6f 100644 --- a/trunk/ulp/dapl/dapl/udapl/linux/dapl_osd.h +++ b/trunk/ulp/dapl/dapl/udapl/linux/dapl_osd.h @@ -34,7 +34,7 @@ * a canonical DAPL interface. Designed to be portable * and hide OS specific quirks of common functions. * - * $Id: dapl_osd.h,v 1.38 2003/08/20 14:08:57 sjs2 Exp $ + * $Id$ **********************************************************************/ #ifndef _DAPL_OSD_H_ diff --git a/trunk/ulp/dapl/dapl/udapl/windows/dapl_osd.c b/trunk/ulp/dapl/dapl/udapl/windows/dapl_osd.c index 0bd255be..6d07686b 100644 --- a/trunk/ulp/dapl/dapl/udapl/windows/dapl_osd.c +++ b/trunk/ulp/dapl/dapl/udapl/windows/dapl_osd.c @@ -35,7 +35,7 @@ * of common functions. * * - * $Id: dapl_osd.c,v 1.16 2003/07/16 17:54:27 sjs2 Exp $ + * $Id$ **********************************************************************/ /* diff --git a/trunk/ulp/dapl/dapl/udapl/windows/dapl_osd.h b/trunk/ulp/dapl/dapl/udapl/windows/dapl_osd.h index 07e556a8..0f39272b 100644 --- a/trunk/ulp/dapl/dapl/udapl/windows/dapl_osd.h +++ b/trunk/ulp/dapl/dapl/udapl/windows/dapl_osd.h @@ -34,7 +34,7 @@ * a canonical DAPL interface. Designed to be portable * and hide OS specific quirks of common functions. * - * $Id: dapl_osd.h,v 1.20 2003/07/31 14:04:18 jlentini Exp $ + * $Id$ **********************************************************************/ #ifndef _DAPL_OSD_H_ diff --git a/trunk/ulp/dapl/dat/common/dat_strerror.c b/trunk/ulp/dapl/dat/common/dat_strerror.c index 3eb32917..1a0d21e0 100644 --- a/trunk/ulp/dapl/dat/common/dat_strerror.c +++ b/trunk/ulp/dapl/dat/common/dat_strerror.c @@ -30,7 +30,7 @@ * * PURPOSE: Convert DAT_RETURN values to humman readable string * - * $Id: dat_strerror.c,v 1.3 2003/09/24 14:49:46 sjs2 Exp $ + * $Id$ **********************************************************************/ #include diff --git a/trunk/ulp/dapl/dat/kdat/dat_kdapl.c b/trunk/ulp/dapl/dat/kdat/dat_kdapl.c index 767e3574..c702e931 100644 --- a/trunk/ulp/dapl/dat/kdat/dat_kdapl.c +++ b/trunk/ulp/dapl/dat/kdat/dat_kdapl.c @@ -32,7 +32,7 @@ * Description: Interfaces in this file are completely described in *the kDAPL 1.0 API * - * $Id: dat_kdapl.c,v 1.6 2003/06/16 17:53:35 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dat_osd.h" diff --git a/trunk/ulp/dapl/dat/kdat/dat_module.c b/trunk/ulp/dapl/dat/kdat/dat_module.c index 294d1ed5..f1264227 100644 --- a/trunk/ulp/dapl/dat/kdat/dat_module.c +++ b/trunk/ulp/dapl/dat/kdat/dat_module.c @@ -32,7 +32,7 @@ * kernel * Description: a linux module implementation * - * $Id: dat_module.c,v 1.4 2003/06/13 11:10:36 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dat_osd.h" diff --git a/trunk/ulp/dapl/dat/kdat/linux/dat_osd.c b/trunk/ulp/dapl/dat/kdat/linux/dat_osd.c index 4494328a..b463e68b 100644 --- a/trunk/ulp/dapl/dat/kdat/linux/dat_osd.c +++ b/trunk/ulp/dapl/dat/kdat/linux/dat_osd.c @@ -35,7 +35,7 @@ * of common functions. * * - * $Id: dat_osd.c,v 1.5 2003/06/16 17:53:35 sjs2 Exp $ + * $Id$ **********************************************************************/ #include diff --git a/trunk/ulp/dapl/dat/kdat/linux/dat_osd.h b/trunk/ulp/dapl/dat/kdat/linux/dat_osd.h index 0e308e7c..1a4cf15c 100644 --- a/trunk/ulp/dapl/dat/kdat/linux/dat_osd.h +++ b/trunk/ulp/dapl/dat/kdat/linux/dat_osd.h @@ -34,7 +34,7 @@ * a canonical DAT interface. Designed to be portable * and hide OS specific quirks of common functions. * - * $Id: dat_osd.h,v 1.5 2003/06/16 17:53:35 sjs2 Exp $ + * $Id$ **********************************************************************/ #ifndef _DAT_OSD_H_ diff --git a/trunk/ulp/dapl/dat/udat/linux/dat_osd.c b/trunk/ulp/dapl/dat/udat/linux/dat_osd.c index 411e7fc7..3101eea6 100644 --- a/trunk/ulp/dapl/dat/udat/linux/dat_osd.c +++ b/trunk/ulp/dapl/dat/udat/linux/dat_osd.c @@ -34,7 +34,7 @@ * interface. Designed to be portable and hide OS specific quirks * of common functions. * - * $Id: dat_osd.c,v 1.8 2003/08/15 20:09:52 jlentini Exp $ + * $Id$ **********************************************************************/ #include "dat_osd.h" diff --git a/trunk/ulp/dapl/dat/udat/linux/dat_osd.h b/trunk/ulp/dapl/dat/udat/linux/dat_osd.h index acf4f760..d7f5e312 100644 --- a/trunk/ulp/dapl/dat/udat/linux/dat_osd.h +++ b/trunk/ulp/dapl/dat/udat/linux/dat_osd.h @@ -34,7 +34,7 @@ * a canonical DAT interface. Designed to be portable * and hide OS specific quirks of common functions. * - * $Id: dat_osd.h,v 1.14 2003/07/31 14:04:19 jlentini Exp $ + * $Id$ **********************************************************************/ #ifndef _DAT_OSD_H_ diff --git a/trunk/ulp/dapl/dat/udat/udat.c b/trunk/ulp/dapl/dat/udat/udat.c index 5c0197a1..3e2566c3 100644 --- a/trunk/ulp/dapl/dat/udat/udat.c +++ b/trunk/ulp/dapl/dat/udat/udat.c @@ -30,7 +30,7 @@ * * PURPOSE: DAT Provider and Consumer registry functions. * - * $Id: udat.c,v 1.13 2003/08/20 14:28:40 hobie16 Exp $ + * $Id$ **********************************************************************/ #include diff --git a/trunk/ulp/dapl/dat/udat/udat_sr_parser.c b/trunk/ulp/dapl/dat/udat/udat_sr_parser.c index 3d8aa8e2..092546d3 100644 --- a/trunk/ulp/dapl/dat/udat/udat_sr_parser.c +++ b/trunk/ulp/dapl/dat/udat/udat_sr_parser.c @@ -30,7 +30,7 @@ * * PURPOSE: static registry parser * - * $Id: udat_sr_parser.c,v 1.1 2003/07/31 14:04:19 jlentini Exp $ + * $Id$ **********************************************************************/ diff --git a/trunk/ulp/dapl/dat/udat/udat_sr_parser.h b/trunk/ulp/dapl/dat/udat/udat_sr_parser.h index 9f27bfcf..de5b3e61 100644 --- a/trunk/ulp/dapl/dat/udat/udat_sr_parser.h +++ b/trunk/ulp/dapl/dat/udat/udat_sr_parser.h @@ -30,7 +30,7 @@ * * PURPOSE: static registry (SR) parser inteface declarations * - * $Id: udat_sr_parser.h,v 1.1 2003/07/31 14:04:19 jlentini Exp $ + * $Id$ **********************************************************************/ #ifndef _DAT_SR_PARSER_H_ diff --git a/trunk/ulp/dapl/dat/udat/windows/dat_osd.c b/trunk/ulp/dapl/dat/udat/windows/dat_osd.c index cb702741..e86a784c 100644 --- a/trunk/ulp/dapl/dat/udat/windows/dat_osd.c +++ b/trunk/ulp/dapl/dat/udat/windows/dat_osd.c @@ -34,7 +34,7 @@ * interface. Designed to be portable and hide OS specific quirks * of common functions. * - * $Id: dat_osd.c,v 1.6 2003/06/16 17:53:35 sjs2 Exp $ + * $Id$ **********************************************************************/ #include "dat_osd.h" diff --git a/trunk/ulp/dapl/dat/udat/windows/dat_osd.h b/trunk/ulp/dapl/dat/udat/windows/dat_osd.h index c3a92b17..e25e2f43 100644 --- a/trunk/ulp/dapl/dat/udat/windows/dat_osd.h +++ b/trunk/ulp/dapl/dat/udat/windows/dat_osd.h @@ -34,7 +34,7 @@ * a canonical DAPL interface. Designed to be portable * and hide OS specific quirks of common functions. * - * $Id: dat_osd.h,v 1.12 2003/08/15 20:09:53 jlentini Exp $ + * $Id$ **********************************************************************/ #ifndef _DAT_OSD_H_ diff --git a/trunk/ulp/dapl/dat/udat/windows/dat_osd_sr.h b/trunk/ulp/dapl/dat/udat/windows/dat_osd_sr.h index 12acdec1..0c9de746 100644 --- a/trunk/ulp/dapl/dat/udat/windows/dat_osd_sr.h +++ b/trunk/ulp/dapl/dat/udat/windows/dat_osd_sr.h @@ -14,7 +14,7 @@ * * PURPOSE: static registry (SR) platform specific inteface declarations * - * $Id: dat_osd_sr.h,v 1.1 2003/03/28 22:52:47 jlentini Exp $ + * $Id$ **********************************************************************/ #ifndef _DAT_OSD_SR_H_ diff --git a/trunk/ulp/ipoib/kernel/ipoib_adapter.c b/trunk/ulp/ipoib/kernel/ipoib_adapter.c index ccbd53ff..276c8e02 100644 --- a/trunk/ulp/ipoib/kernel/ipoib_adapter.c +++ b/trunk/ulp/ipoib/kernel/ipoib_adapter.c @@ -205,6 +205,21 @@ ipoib_create_adapter( } +ib_api_status_t +ipoib_start_adapter( + IN ipoib_adapter_t* const p_adapter ) +{ + ib_api_status_t status; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + status = __ipoib_pnp_reg( p_adapter, IB_PNP_FLAG_REG_SYNC ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); + return status; +} + + void ipoib_destroy_adapter( IN ipoib_adapter_t* const p_adapter ) @@ -340,8 +355,6 @@ adapter_init( return status; } - status = __ipoib_pnp_reg( p_adapter, IB_PNP_FLAG_REG_SYNC ); - IPOIB_EXIT( IPOIB_DBG_INIT ); return status; } @@ -562,6 +575,9 @@ __ipoib_pnp_cb( case IB_PNP_PORT_ADD: /* If we were initializing, we might have pended some OIDs. */ ipoib_resume_oids( p_adapter ); + NdisMIndicateStatus( p_adapter->h_adapter, + NDIS_STATUS_MEDIA_DISCONNECT, NULL, 0 ); + NdisMIndicateStatusComplete( p_adapter->h_adapter ); break; default: @@ -819,13 +835,17 @@ ipoib_set_active( * If we had a pending OID request for OID_GEN_LINK_SPEED, * complete it now. */ - if( old_state == IB_PNP_PORT_ADD || - old_state == IB_PNP_PORT_REMOVE ) + switch( old_state ) { + case IB_PNP_PORT_ADD: + ipoib_reg_addrs( p_adapter ); + /* Fall through. */ + + case IB_PNP_PORT_REMOVE: ipoib_resume_oids( p_adapter ); - } - else - { + break; + + default: /* Join all programmed multicast groups. */ for( i = 0; i < p_adapter->mcast_array_size; i++ ) { diff --git a/trunk/ulp/ipoib/kernel/ipoib_adapter.h b/trunk/ulp/ipoib/kernel/ipoib_adapter.h index 1d71c55c..c82572cc 100644 --- a/trunk/ulp/ipoib/kernel/ipoib_adapter.h +++ b/trunk/ulp/ipoib/kernel/ipoib_adapter.h @@ -303,6 +303,11 @@ ipoib_create_adapter( OUT ipoib_adapter_t** const pp_adapter ); +ib_api_status_t +ipoib_start_adapter( + IN ipoib_adapter_t* const p_adapter ); + + void ipoib_destroy_adapter( IN ipoib_adapter_t* const p_adapter ); diff --git a/trunk/ulp/ipoib/kernel/ipoib_driver.c b/trunk/ulp/ipoib/kernel/ipoib_driver.c index 72cb514d..bedeaf8b 100644 --- a/trunk/ulp/ipoib/kernel/ipoib_driver.c +++ b/trunk/ulp/ipoib/kernel/ipoib_driver.c @@ -522,7 +522,6 @@ ipoib_initialize( *p_selected_medium_index = medium_index; - /* Create the adapter adapter */ ib_status = ipoib_create_adapter( wrapper_config_context, h_adapter, &p_adapter ); if( ib_status != IB_SUCCESS ) @@ -550,6 +549,16 @@ ipoib_initialize( } #endif + /* Create the adapter adapter */ + ib_status = ipoib_start_adapter( p_adapter ); + if( ib_status != IB_SUCCESS ) + { + ipoib_destroy_adapter( p_adapter ); + IPOIB_TRACE_EXIT( IPOIB_DBG_ERROR, + ("ipoib_start_adapter returned status %d.\n", ib_status ) ); + return NDIS_STATUS_FAILURE; + } + IPOIB_EXIT( IPOIB_DBG_INIT ); return status; } diff --git a/trunk/ulp/ipoib/kernel/ipoib_endpoint.c b/trunk/ulp/ipoib/kernel/ipoib_endpoint.c index 90384d6e..5fe1c547 100644 --- a/trunk/ulp/ipoib/kernel/ipoib_endpoint.c +++ b/trunk/ulp/ipoib/kernel/ipoib_endpoint.c @@ -358,7 +358,7 @@ __path_query_cb( } cl_obj_unlock( &p_endpt->obj ); - if( p_query_rec->status != IB_SUCCESS ) + if( p_query_rec->status != IB_SUCCESS || !p_query_rec->result_cnt ) { p_port->p_adapter->hung = TRUE; ipoib_endpt_deref( p_endpt ); diff --git a/trunk/ulp/ipoib/kernel/ipoib_port.c b/trunk/ulp/ipoib/kernel/ipoib_port.c index dce61f27..3c21429a 100644 --- a/trunk/ulp/ipoib/kernel/ipoib_port.c +++ b/trunk/ulp/ipoib/kernel/ipoib_port.c @@ -1754,6 +1754,20 @@ __recv_mgr_filter( p_eth = &p_desc->p_buf->eth.pkt; #endif /*IPOIB_INLINE_RECV */ + /* Don't report loopback traffic - we requested SW loopback. */ + if( !cl_memcmp( &p_port->p_adapter->mac, &p_eth->hdr.src, + sizeof(p_port->p_adapter->mac) ) ) + { + /* + * "This is not the packet you're looking for" - don't update + * receive statistics, the packet never happened. + */ + cl_qlist_insert_tail( p_bad_list, &p_desc->item.list_item ); + /* Dereference the port object on behalf of the failed receive. */ + cl_obj_deref( &p_port->obj ); + continue; + } + switch( p_ipoib->hdr.type ) { case ETH_PROT_TYPE_IP: @@ -1921,6 +1935,8 @@ __recv_dhcp( IPOIB_ENTER( IPOIB_DBG_RECV ); + UNUSED_PARAM( p_port ); + /* Create the ethernet header. */ status = __recv_gen( p_ipoib, p_eth, p_src, p_dst ); if( status != IB_SUCCESS ) @@ -1938,17 +1954,6 @@ __recv_dhcp( IPOIB_TRACE_EXIT( IPOIB_DBG_ERROR, ("Invalid DHCP op code.\n") ); return IB_INVALID_SETTING; } - /* find a better way to check for echo packets ? */ - if ( p_port->p_adapter->mac.addr[0] == p_eth->hdr.src.addr[0] && - p_port->p_adapter->mac.addr[1] == p_eth->hdr.src.addr[1] && - p_port->p_adapter->mac.addr[2] == p_eth->hdr.src.addr[2] && - p_port->p_adapter->mac.addr[3] == p_eth->hdr.src.addr[3] && - p_port->p_adapter->mac.addr[4] == p_eth->hdr.src.addr[4] && - p_port->p_adapter->mac.addr[5] == p_eth->hdr.src.addr[5] ) - { - IPOIB_TRACE_EXIT( IPOIB_DBG_ERROR, ("Local echo dhcp msg.\n") ); - return IB_INVALID_PARAMETER; - } /* * Find the client identifier option, making sure to skip @@ -3405,6 +3410,7 @@ ipoib_port_send( IPOIB_ENTER( IPOIB_DBG_SEND ); + cl_spinlock_acquire( &p_port->send_lock ); for( i = 0; i < num_packets; i++ ) { desc.p_pkt = p_packet_array[i]; @@ -3424,7 +3430,6 @@ ipoib_port_send( continue; } - cl_spinlock_acquire( &p_port->send_lock ); cl_perf_start( SendMgrQueue ); status = __send_mgr_queue( p_port, p_eth_hdr, &desc.p_endpt ); cl_perf_stop( &p_port->p_adapter->perf, SendMgrQueue ); @@ -3438,10 +3443,8 @@ ipoib_port_send( IPOIB_LIST_ITEM_FROM_PACKET( p_packet_array[i++] ) ); } cl_perf_stop( &p_port->p_adapter->perf, QueuePacket ); - cl_spinlock_release( &p_port->send_lock ); - return; + break; } - cl_spinlock_release( &p_port->send_lock ); if( status != NDIS_STATUS_SUCCESS ) { ASSERT( status == NDIS_STATUS_NO_ROUTE_TO_DESTINATION ); @@ -3455,7 +3458,6 @@ ipoib_port_send( continue; } - /* No lock needed to build the work request. */ cl_perf_start( BuildSendDesc ); status = __build_send_desc( p_port, p_eth_hdr, p_buf, buf_len, &desc ); cl_perf_stop( &p_port->p_adapter->perf, BuildSendDesc ); @@ -3486,6 +3488,7 @@ ipoib_port_send( cl_atomic_inc( &p_port->send_mgr.depth ); } + cl_spinlock_release( &p_port->send_lock ); IPOIB_EXIT( IPOIB_DBG_SEND ); } diff --git a/trunk/ulp/srp/kernel/srp_hba.h b/trunk/ulp/srp/kernel/srp_hba.h index 1a99fa1b..96d9e3dd 100644 --- a/trunk/ulp/srp/kernel/srp_hba.h +++ b/trunk/ulp/srp/kernel/srp_hba.h @@ -44,12 +44,15 @@ typedef struct _srp_session *p_srp_session_t; +#pragma warning(disable:4324) typedef struct _srp_path_record { cl_list_item_t list_item; ib_path_rec_t path_rec; } srp_path_record_t; +#pragma warning(default:4324) + typedef struct _srp_hba { diff --git a/trunk/ulp/wsd/user/ib_cm.c b/trunk/ulp/wsd/user/ib_cm.c index 9dcc9d3e..aa4fe7aa 100644 --- a/trunk/ulp/wsd/user/ib_cm.c +++ b/trunk/ulp/wsd/user/ib_cm.c @@ -51,36 +51,30 @@ get_service_id_for_port( /* Signals a select event to the switch. */ -static void -post_select_event( +void +ibsp_post_select_event( struct ibsp_socket_info *socket_info, int event, int error ) { - CL_ENTER( IBSP_DBG_NEV, gdbg_lvl ); + HANDLE h_event; - if( (socket_info->event_mask & event) == 0 ) - { - /* This event is not requested. Since we capture only two important - * event, this case should never occur. */ - CL_EXIT_ERROR( IBSP_DBG_NEV, gdbg_lvl, - ("Hummm, tried to post an umasked event. (%x, %x)\n", - socket_info->event_mask, event) ); - return; - } + IBSP_ENTER( IBSP_DBG_NEV ); - cl_spinlock_acquire( &socket_info->event_mutex ); - - socket_info->network_events |= event; + CL_ASSERT( socket_info ); + CL_ASSERT( event ); switch( event ) { case FD_CONNECT: + IBSP_TRACE1( IBSP_DBG_NEV, + ("socket %p FD_CONNECT\n", socket_info) ); socket_info->errno_connect = error; break; case FD_ACCEPT: - socket_info->errno_accept = error; + IBSP_TRACE1( IBSP_DBG_NEV, + ("socket %p FD_ACCEPT\n", socket_info) ); break; default: @@ -88,11 +82,20 @@ post_select_event( break; } - cl_spinlock_release( &socket_info->event_mutex ); + _InterlockedOr( &socket_info->network_events, event ); - SetEvent( socket_info->event_select ); + h_event = InterlockedCompareExchangePointer( + &socket_info->event_select, NULL, NULL ); + /* Check for event notification request and signal as needed. */ + if( (socket_info->event_mask & event) && h_event ) + { + IBSP_TRACE2( IBSP_DBG_NEV, + ("Signaling eventHandle %p at time %I64d.\n", + h_event, cl_get_time_stamp() ) ); + SetEvent( h_event ); + } - CL_EXIT( IBSP_DBG_NEV, gdbg_lvl ); + IBSP_EXIT( IBSP_DBG_NEV ); } @@ -165,7 +168,7 @@ cm_req_callback( mra.svc_timeout = 0x1F; ib_cm_mra( p_cm_req_rec->h_cm_req, &mra ); - post_select_event( socket_info, FD_ACCEPT, 0 ); + ibsp_post_select_event( socket_info, FD_ACCEPT, 0 ); break; case IBSP_DUPLICATING_REMOTE: @@ -271,12 +274,12 @@ cm_rep_callback( /* Note: a REJ has been automatically sent. */ CL_ERROR( IBSP_DBG_CM, gdbg_lvl, ("ib_cm_rtu failed (0x%d)\n", status) ); IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_BIND ); - post_select_event( socket_info, FD_CONNECT, WSAENOBUFS ); + ibsp_post_select_event( socket_info, FD_CONNECT, WSAENOBUFS ); } else { IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_CONNECTED ); - post_select_event( socket_info, FD_CONNECT, 0 ); + ibsp_post_select_event( socket_info, FD_CONNECT, 0 ); } } else if( socket_info->socket_state == IBSP_DUPLICATING_NEW ) @@ -450,7 +453,7 @@ cm_rej_callback( { case IBSP_CONNECT: IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_BIND ); - post_select_event( socket_info, FD_CONNECT, WSAECONNREFUSED ); + ibsp_post_select_event( socket_info, FD_CONNECT, WSAECONNREFUSED ); break; case IBSP_ACCEPT: @@ -698,8 +701,6 @@ ib_listen( socket_info->info.listen.listen_req_param.identifier)); param.pfn_cm_req_cb = cm_req_callback; - param.pfn_cm_mra_cb = cm_mra_callback; - param.pfn_cm_rej_cb = cm_rej_callback; param.qp_type = IB_QPT_RELIABLE_CONN; @@ -852,7 +853,6 @@ ib_connect( cm_req.rnr_retry_cnt = QP_ATTRIB_RNR_RETRY; cm_req.retry_cnt = QP_ATTRIB_RETRY_COUNT; cm_req.p_alt_path = NULL; - cm_req.pfn_cm_req_cb = cm_req_callback; cm_req.pfn_cm_mra_cb = cm_mra_callback; cm_req.pfn_cm_rej_cb = cm_rej_callback; @@ -921,6 +921,8 @@ ib_accept( cm_rep.flow_ctrl = cm_req_received->flow_ctrl; cm_rep.rnr_nak_timeout = QP_ATTRIB_RNR_NAK_TIMEOUT; cm_rep.rnr_retry_cnt = cm_req_received->rnr_retry_cnt; + cm_rep.pfn_cm_mra_cb = cm_mra_callback; + cm_rep.pfn_cm_rej_cb = cm_rej_callback; cm_rep.pfn_cm_rtu_cb = cm_rtu_callback; cm_rep.pfn_cm_lap_cb = cm_lap_callback; cm_rep.pfn_cm_dreq_cb = cm_dreq_callback; diff --git a/trunk/ulp/wsd/user/ibsp_iblow.c b/trunk/ulp/wsd/user/ibsp_iblow.c index 5a13f25a..49073274 100644 --- a/trunk/ulp/wsd/user/ibsp_iblow.c +++ b/trunk/ulp/wsd/user/ibsp_iblow.c @@ -186,9 +186,9 @@ complete_wq( // cl_spinlock_release( &socket_info->recv_lock ); //} - if( p_recv_wr->ds_array[0].length >= 40 ) + if( wc->status == IB_SUCCESS && p_recv_wr->ds_array[0].length >= 40 ) { - debug_dump_buffer( IBSP_DBG_WQ, gdbg_lvl, "RECV", + debug_dump_buffer( IBSP_DBG_WQ | IBSP_DBG_LEVEL4, "RECV", (void * __ptr64)p_recv_wr->ds_array[0].vaddr, 40 ); } @@ -1134,7 +1134,7 @@ ib_create_socket( status = ib_query_qp( socket_info->qp, &qp_attr ); if( status == IB_SUCCESS ) { - socket_info->max_inline = qp_attr.sq_max_inline; + socket_info->max_inline = min( g_max_inline, qp_attr.sq_max_inline ); } else { diff --git a/trunk/ulp/wsd/user/ibspdebug.c b/trunk/ulp/wsd/user/ibspdebug.c index 3dc2d70f..a2e98c00 100644 --- a/trunk/ulp/wsd/user/ibspdebug.c +++ b/trunk/ulp/wsd/user/ibspdebug.c @@ -116,7 +116,6 @@ DebugPrintSockAddr( void debug_dump_buffer( uint32_t level, - uint32_t mask, const char *name, void *buf, size_t len ) @@ -129,19 +128,19 @@ debug_dump_buffer( s = str; *s = 0; - CL_PRINT( level, mask, ("HEX for %s:\n", name) ); + CL_PRINT( level, gdbg_lvl, ("HEX for %s:\n", name) ); for( i = 0; i < len; i++ ) { s += sprintf( s, "%02x ", p[i] ); if( i % 16 == 15 ) { - CL_PRINT( level, mask, ("HEX:%s: %s\n", name, str) ); + CL_PRINT( level, gdbg_lvl, ("HEX:%s: %s\n", name, str) ); s = str; *s = 0; } } - CL_PRINT( level, mask, ("HEX:%s:%s\n", name, str) ); + CL_PRINT( level, gdbg_lvl, ("HEX:%s: %s\n", name, str) ); } diff --git a/trunk/ulp/wsd/user/ibspdebug.h b/trunk/ulp/wsd/user/ibspdebug.h index cf711968..7d14e570 100644 --- a/trunk/ulp/wsd/user/ibspdebug.h +++ b/trunk/ulp/wsd/user/ibspdebug.h @@ -70,31 +70,7 @@ extern uint32_t gdbg_lvl; #define IBSP_ENTER( l ) CL_ENTER( (l | IBSP_DBG_FUNC), gdbg_lvl ) #define IBSP_EXIT( l ) CL_EXIT( (l | IBSP_DBG_FUNC), gdbg_lvl ) -#if defined UNIT_TEST - -#define BREAKPOINT(x) - -#define CL_ERROR(a, b, c) printf c -#define CL_EXIT_ERROR(a, b, c) printf c - -#undef CL_TRACE -#define CL_TRACE(a,b,c) printf c -#undef CL_ENTER -#define CL_ENTER(a,b) printf("Enter %s\n", __FUNCTION__) -#undef CL_EXIT -#define CL_EXIT(a,b) printf("Exit %s\n", __FUNCTION__) -#undef CL_PRINT -#define CL_PRINT(a,b,c) printf c - -#define fzprint(a) - -#define STAT_INC(name) -#define STAT_DEC(name) -#define free_socket_info(a) -#define ib_deregister_all_mr(a) -#define DebugPrintSockAddr(a,b,c) - -#elif defined _DEBUG_ +#if defined _DEBUG_ //#define fzprint(a) CL_PRINT(IBSP_DBG_USER, IBSP_DBG_USER, a) #define fzprint(a) @@ -122,7 +98,6 @@ DebugPrintSockAddr( void debug_dump_buffer( uint32_t level, - uint32_t mask, const char *name, void *buf, size_t len ); diff --git a/trunk/ulp/wsd/user/ibspdll.c b/trunk/ulp/wsd/user/ibspdll.c index dc2e9bdc..8b7bbffa 100644 --- a/trunk/ulp/wsd/user/ibspdll.c +++ b/trunk/ulp/wsd/user/ibspdll.c @@ -47,6 +47,7 @@ static const GUID provider_guid = { }; static DWORD no_read = 0; +uint32_t g_max_inline = 0xFFFFFFFF; /* * Function: DllMain @@ -60,10 +61,8 @@ _DllMain( IN DWORD dwReason, IN LPVOID lpvReserved ) { -#ifdef _DEBUG_ - TCHAR dbg_lvl_str[16]; + TCHAR env_var[16]; DWORD i; -#endif CL_ENTER( IBSP_DBG_DLL, gdbg_lvl ); @@ -94,10 +93,10 @@ _DllMain( CL_TRACE( IBSP_DBG_DLL, gdbg_lvl, ("DllMain: DLL_PROCESS_ATTACH\n") ); #ifdef _DEBUG_ - i = GetEnvironmentVariable( "IBWSD_DBG", dbg_lvl_str, 16 ); + i = GetEnvironmentVariable( "IBWSD_DBG", env_var, 16 ); if( i && i <= 16 ) { - gdbg_lvl = _tcstoul( dbg_lvl_str, NULL, 16 ); + gdbg_lvl = _tcstoul( env_var, NULL, 16 ); IBSP_TRACE( IBSP_DBG_DLL, ("Given IBWSD_DBG debug level:0x%X\n", gdbg_lvl) ); @@ -107,6 +106,10 @@ _DllMain( /* See if the user wants to disable RDMA reads. */ no_read = GetEnvironmentVariable( "IBWSD_NO_READ", NULL, 0 ); + i = GetEnvironmentVariable( "IBWSD_INLINE", env_var, 16 ); + if( i && i <= 16 ) + g_max_inline = _tcstoul( env_var, NULL, 10 ); + if( init_globals() ) return FALSE; break; @@ -242,9 +245,8 @@ IBSPAccept( WSABUF callee_id; struct listen_incoming *incoming; struct ibsp_port *port; - BOOLEAN reject; - CL_ENTER( IBSP_DBG_CONN, gdbg_lvl ); + IBSP_ENTER( IBSP_DBG_CONN ); fzprint(("%s():%d:0x%x:0x%x: socket=0x%p \n", __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s)); @@ -253,8 +255,8 @@ IBSPAccept( if( *addrlen < sizeof(struct sockaddr_in) ) { - CL_EXIT_ERROR( IBSP_DBG_CONN, gdbg_lvl, - ("invalid addrlen (%d, %d)\n", *addrlen, sizeof(struct sockaddr_in)) ); + IBSP_ERROR_EXIT( ("invalid addrlen (%d, %d)\n", + *addrlen, sizeof(struct sockaddr_in)) ); *lpErrno = WSAEFAULT; return INVALID_SOCKET; } @@ -269,8 +271,7 @@ IBSPAccept( if( socket_info->socket_state != IBSP_LISTEN ) { cl_spinlock_release( &socket_info->mutex ); - CL_EXIT_ERROR( IBSP_DBG_CONN, gdbg_lvl, - ("Socket is not in right socket_state (%s)\n", + IBSP_ERROR_EXIT( ("Socket is not in right socket_state (%s)\n", IBSP_SOCKET_STATE_STR( socket_info->socket_state )) ); *lpErrno = WSAEINVAL; return INVALID_SOCKET; @@ -280,8 +281,7 @@ IBSPAccept( { cl_spinlock_release( &socket_info->mutex ); - CL_EXIT_ERROR( IBSP_DBG_CONN, gdbg_lvl, - ("No pending connection found for this socket\n") ); + IBSP_ERROR_EXIT( ("No pending connection found for this socket\n") ); *lpErrno = WSAEWOULDBLOCK; return INVALID_SOCKET; } @@ -293,17 +293,15 @@ IBSPAccept( struct listen_incoming, item); port = socket_info->port; - reject = FALSE; - /* Find the destination IP address */ if( port == NULL ) { /* The socket was bound to INADDR_ANY. We must find the correct port - * for the new socket. */ + * for the new socket. */ port = get_port_from_ip_address( incoming->params.dest.sin_addr ); if( port == NULL ) { - CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, + IBSP_ERROR( ("incoming destination IP address not local (%s)\n", inet_ntoa( incoming->params.dest.sin_addr )) ); goto reject; @@ -313,15 +311,11 @@ IBSPAccept( /* Cross-check with the path info to make sure we are conectiong correctly */ if( port->guid != ib_gid_get_guid( &incoming->cm_req_received.primary_path.sgid ) ) { - CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, + IBSP_ERROR( ("GUIDs of port for destination IP address and primary path do not match (%016I64x, %016I64x)\n", port->guid, ib_gid_get_guid( &incoming->cm_req_received.primary_path.sgid )) ); - goto reject; - } - if( reject ) - { reject: /* The request is invalid. Remove it from the list and reject it. */ cl_qlist_remove_item( &socket_info->info.listen.list, &incoming->item ); @@ -347,26 +341,18 @@ reject: callee_id.buf = (char *)&incoming->params.dest; callee_id.len = sizeof(incoming->params.dest); -#ifdef _DEBUG_ - { - char buf[100]; - char *p = buf; - p += sprintf( p, "got incoming connection from %s/%d-%d to", - inet_ntoa( incoming->params.source.sin_addr ), - cl_ntoh16( incoming->params.source.sin_port ), - incoming->params.source.sin_family ); - p += sprintf( p, " %s/%d-%d", - inet_ntoa( incoming->params.dest.sin_addr ), - cl_ntoh16( incoming->params.dest.sin_port ), - incoming->params.dest.sin_family ); - - CL_TRACE( IBSP_DBG_CONN, gdbg_lvl, (buf) ); - } -#endif + IBSP_TRACE( IBSP_DBG_CONN, + ("Got incoming conn from %s/%d-%d to %s/%d-%d\n", + inet_ntoa( incoming->params.source.sin_addr ), + cl_ntoh16( incoming->params.source.sin_port ), + incoming->params.source.sin_family, + inet_ntoa( incoming->params.dest.sin_addr ), + cl_ntoh16( incoming->params.dest.sin_port ), + incoming->params.dest.sin_family) ); /* Call the conditional function */ - ret = lpfnCondition( &caller_id, NULL, - NULL, NULL, &callee_id, NULL, NULL, dwCallbackData ); + ret = lpfnCondition( &caller_id, NULL, NULL, NULL, + &callee_id, NULL, NULL, dwCallbackData ); switch( ret ) { @@ -374,23 +360,25 @@ reject: cl_qlist_remove_item( &socket_info->info.listen.list, &incoming->item ); cl_spinlock_release( &socket_info->mutex ); - ib_reject( incoming->cm_req_received.h_cm_req, IB_REJ_INSUF_QP ); + IBSP_TRACE1( IBSP_DBG_CONN, + ("Conditional routine returned CF_REJECT\n") ); + + ib_reject( incoming->cm_req_received.h_cm_req, IB_REJ_USER_DEFINED ); HeapFree( g_ibsp.heap, 0, incoming ); - CL_EXIT_ERROR( IBSP_DBG_CONN, gdbg_lvl, - ("Conditional routine rejected connection\n") ); *lpErrno = WSAECONNREFUSED; + IBSP_EXIT( IBSP_DBG_CONN ); return INVALID_SOCKET; - break; case CF_DEFER: cl_spinlock_release( &socket_info->mutex ); - CL_EXIT_ERROR( IBSP_DBG_CONN, gdbg_lvl, ("Conditional routine returned defer\n") ); + IBSP_TRACE1( IBSP_DBG_CONN, + ("Conditional routine returned CF_DEFER\n") ); /* TODO: Send MRA */ *lpErrno = WSATRY_AGAIN; + IBSP_EXIT( IBSP_DBG_CONN ); return INVALID_SOCKET; - break; case CF_ACCEPT: break; @@ -398,10 +386,11 @@ reject: default: /* Should never happen */ cl_spinlock_release( &socket_info->mutex ); - CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, - ("lpfnCondition returned undocumented code (%d)\n", ret) ); + IBSP_ERROR( + ("Conditional routine returned undocumented code (%d)\n", ret) ); CL_ASSERT( 0 ); *lpErrno = WSAECONNREFUSED; + IBSP_EXIT( IBSP_DBG_CONN ); return INVALID_SOCKET; } @@ -452,12 +441,12 @@ reject: new_socket_info->local_addr = incoming->params.dest; cl_qlist_remove_item( &socket_info->info.listen.list, &incoming->item ); + /* Signal the event again if there are more connection requests. */ + if( cl_qlist_count( &socket_info->info.listen.list ) ) + ibsp_post_select_event( socket_info, FD_ACCEPT, 0 ); cl_spinlock_release( &socket_info->mutex ); - /* Update the state of the socket context */ - IBSP_CHANGE_SOCKET_STATE( new_socket_info, IBSP_ACCEPT ); - /* Copy the socket context info from parent socket context */ new_socket_info->socket_options = socket_info->socket_options; @@ -473,20 +462,25 @@ reject: new_socket_info->info.accept.event = CreateEvent( NULL, FALSE, FALSE, NULL ); + cl_spinlock_acquire( &new_socket_info->mutex ); + /* Update the state of the socket context */ + IBSP_CHANGE_SOCKET_STATE( new_socket_info, IBSP_ACCEPT ); + ret = ib_accept( new_socket_info, &incoming->cm_req_received, lpErrno ); if( ret ) { + IBSP_CHANGE_SOCKET_STATE( new_socket_info, IBSP_CREATE ); + cl_spinlock_release( &new_socket_info->mutex ); /* Free the socket descriptor */ fzprint(("%s():%d:0x%x:0x%x: socket=0x%p calling lpWPUCloseSocketHandle=0x%p\n", __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), socket_info, socket_info->switch_socket)); if( g_ibsp.up_call_table.lpWPUCloseSocketHandle( - new_socket_info->switch_socket, lpErrno ) == SOCKET_ERROR ) + new_socket_info->switch_socket, &ret ) == SOCKET_ERROR ) { - CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, - ("WPUCloseSocketHandle failed: %d\n", *lpErrno) ); + IBSP_ERROR( ("WPUCloseSocketHandle failed: %d\n", ret) ); } else { @@ -503,7 +497,7 @@ reject: ib_reject( incoming->cm_req_received.h_cm_req, IB_REJ_INSUF_QP ); HeapFree( g_ibsp.heap, 0, incoming ); - *lpErrno = WSAEACCES; + *lpErrno = ret; CL_EXIT_ERROR( IBSP_DBG_CONN, gdbg_lvl, ("ib_accept failed (%d)\n", ret) ); @@ -511,6 +505,7 @@ reject: } else { + cl_spinlock_release( &new_socket_info->mutex ); HeapFree( g_ibsp.heap, 0, incoming ); if( WaitForSingleObject( new_socket_info->info.accept.event, INFINITE ) == WAIT_OBJECT_0 ) @@ -529,7 +524,7 @@ reject: } else { - CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, + IBSP_ERROR( ("ib_accept failed - socket state is %s\n", IBSP_SOCKET_STATE_STR( new_socket_info->socket_state )) ); @@ -543,7 +538,7 @@ reject: if( g_ibsp.up_call_table.lpWPUCloseSocketHandle( new_socket_info->switch_socket, lpErrno ) == SOCKET_ERROR ) { - CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, + IBSP_ERROR( ("WPUCloseSocketHandle failed: %d\n", *lpErrno) ); } else @@ -563,7 +558,6 @@ reject: ("returns new SocketID (0x%x)\n", new_socket) ); return (SOCKET) new_socket_info; - } else { @@ -611,7 +605,7 @@ IBSPBind( /* Sanity checks */ if( namelen != sizeof(struct sockaddr_in) ) { - CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, + IBSP_ERROR( ("invalid namelen (%d instead of %d)\n", namelen, sizeof(struct sockaddr_in)) ); *lpErrno = WSAEFAULT; @@ -620,7 +614,7 @@ IBSPBind( if( addr->sin_family != AF_INET ) { - CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, ("bad family for socket\n") ); + IBSP_ERROR( ("bad family for socket\n") ); *lpErrno = WSAEFAULT; goto error; } @@ -631,7 +625,7 @@ IBSPBind( port = get_port_from_ip_address( addr->sin_addr ); if( port == NULL ) { - CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, + IBSP_ERROR( ("This IP address does not belong to that host (%08x)\n", addr->sin_addr.S_un.S_addr) ); *lpErrno = WSAEADDRNOTAVAIL; @@ -645,13 +639,13 @@ IBSPBind( /* We are going to take this mutex for some time, * but at this stage, it shouldn't impact anything. */ - cl_spinlock_acquire( &socket_info->event_mutex ); + cl_spinlock_acquire( &socket_info->mutex ); /* Verify the state of the socket */ if( socket_info->socket_state != IBSP_CREATE ) { - cl_spinlock_release( &socket_info->event_mutex ); - CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, + cl_spinlock_release( &socket_info->mutex ); + IBSP_ERROR( ("Invalid socket state (%s)\n", IBSP_SOCKET_STATE_STR( socket_info->socket_state )) ); *lpErrno = WSAEINVAL; @@ -666,8 +660,8 @@ IBSPBind( if( ret ) { socket_info->port = NULL; - cl_spinlock_release( &socket_info->event_mutex ); - CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, ("ib_create socket failed with %d\n", ret) ); + cl_spinlock_release( &socket_info->mutex ); + IBSP_ERROR( ("ib_create socket failed with %d\n", ret) ); *lpErrno = WSAENOBUFS; goto error; } @@ -678,7 +672,7 @@ IBSPBind( IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_BIND ); - cl_spinlock_release( &socket_info->event_mutex ); + cl_spinlock_release( &socket_info->mutex ); CL_EXIT( IBSP_DBG_CONN, gdbg_lvl ); return 0; @@ -721,19 +715,18 @@ IBSPCloseSocket( cl_atomic_inc( &g_ibsp.CloseSocket_count ); #endif - cl_spinlock_acquire( &socket_info->event_mutex ); - + cl_spinlock_acquire( &socket_info->mutex ); old_state = socket_info->socket_state; IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_CLOSING ); - cl_spinlock_release( &socket_info->event_mutex ); + cl_spinlock_release( &socket_info->mutex ); shutdown_and_destroy_socket_info( socket_info, old_state ); - cl_spinlock_acquire( &socket_info->event_mutex ); + cl_spinlock_acquire( &socket_info->mutex ); IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_CLOSED ); - cl_spinlock_release( &socket_info->event_mutex ); + cl_spinlock_release( &socket_info->mutex ); /* Take off socket_info_list and put on closed_socket_info_list */ cl_spinlock_acquire( &g_ibsp.socket_info_mutex ); @@ -747,7 +740,6 @@ IBSPCloseSocket( /* Notify ib_cleanup_thread() to free this */ SetEvent( g_ibsp.ib_cleanup_event ); - CL_EXIT( IBSP_DBG_CONN, gdbg_lvl ); *lpErrno = 0; @@ -988,44 +980,33 @@ IBSPEnumNetworkEvents( LPWSANETWORKEVENTS lpNetworkEvents, LPINT lpErrno ) { - struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; + struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; - CL_ENTER( IBSP_DBG_NEV, gdbg_lvl ); + IBSP_ENTER( IBSP_DBG_NEV ); - fzprint(("%s():%d:0x%x:0x%x: socket=0x%p \n", __FUNCTION__, - __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s)); - - cl_spinlock_acquire( &socket_info->event_mutex ); + ResetEvent( hEventObject ); - lpNetworkEvents->lNetworkEvents = 0; + lpNetworkEvents->lNetworkEvents = + InterlockedExchange( &socket_info->network_events, 0 ); - if( socket_info->network_events & FD_ACCEPT ) + if( lpNetworkEvents->lNetworkEvents & FD_ACCEPT ) { - CL_TRACE( IBSP_DBG_NEV, gdbg_lvl, ("FD_ACCEPT\n") ); - lpNetworkEvents->lNetworkEvents |= FD_ACCEPT; - lpNetworkEvents->iErrorCode[FD_ACCEPT_BIT] = socket_info->errno_accept; + IBSP_TRACE1( IBSP_DBG_NEV, + ("socket %p notify FD_ACCEPT at time %I64d\n", + socket_info, cl_get_time_stamp()) ); + lpNetworkEvents->iErrorCode[FD_ACCEPT_BIT] = 0; } - if( socket_info->network_events & FD_CONNECT ) + if( lpNetworkEvents->lNetworkEvents & FD_CONNECT ) { - CL_TRACE( IBSP_DBG_NEV, gdbg_lvl, ("FD_CONNECT\n") ); - lpNetworkEvents->lNetworkEvents |= FD_CONNECT; + IBSP_TRACE1( IBSP_DBG_NEV, + ("socket %p notify FD_CONNECT %d at time %I64d\n", + socket_info, socket_info->errno_connect, cl_get_time_stamp()) ); lpNetworkEvents->iErrorCode[FD_CONNECT_BIT] = socket_info->errno_connect; } - socket_info->network_events = 0; - - ResetEvent( hEventObject ); - - cl_spinlock_release( &socket_info->event_mutex ); - - CL_TRACE_EXIT( IBSP_DBG_NEV, gdbg_lvl, - ("returning %x, accept=%d, connect=%d\n", - lpNetworkEvents->lNetworkEvents, - lpNetworkEvents->iErrorCode[FD_ACCEPT_BIT], - lpNetworkEvents->iErrorCode[FD_CONNECT_BIT]) ); - *lpErrno = 0; + IBSP_EXIT( IBSP_DBG_NEV ); return 0; } @@ -1043,30 +1024,38 @@ IBSPEventSelect( long lNetworkEvents, LPINT lpErrno ) { - struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; + struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; + long events; - CL_ENTER( IBSP_DBG_NEV, gdbg_lvl ); + IBSP_ENTER( IBSP_DBG_NEV ); - fzprint(("%s():%d:0x%x:0x%x: socket=0x%p\n", __FUNCTION__, - __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s)); + IBSP_TRACE4( IBSP_DBG_NEV, + ("Socket %p requesting notifiction of %d on event %p.\n", + s, lNetworkEvents, hEventObject) ); if( (lNetworkEvents & ~(FD_ACCEPT | FD_CONNECT)) != 0 ) { - CL_TRACE_EXIT(IBSP_DBG_NEV, gdbg_lvl, + IBSP_TRACE_EXIT(IBSP_DBG_NEV, ("Unknown lNetworkEvents flag given (%x)\n", lNetworkEvents) ); *lpErrno = WSAEINVAL; return SOCKET_ERROR; } - CL_ASSERT( (hEventObject == NULL && socket_info->event_select != NULL) || - (hEventObject != NULL && socket_info->event_select == NULL) ); CL_ASSERT( lpErrno ); - if( hEventObject ) - socket_info->event_select = hEventObject; socket_info->event_mask = lNetworkEvents; + InterlockedExchangePointer( &socket_info->event_select, hEventObject ); - CL_EXIT( IBSP_DBG_NEV, gdbg_lvl ); + events = InterlockedCompareExchange( &socket_info->network_events, 0, 0 ); + /* Check for existing events and signal as appropriate. */ + if( (socket_info->event_mask & events) && hEventObject ) + { + IBSP_TRACE2( IBSP_DBG_NEV, + ("Signaling eventHandle %p .\n", socket_info->event_select) ); + SetEvent( hEventObject ); + } + + IBSP_EXIT( IBSP_DBG_NEV ); return 0; } @@ -1374,20 +1363,17 @@ IBSPIoctl( GUID SANRDMARead = WSAID_RDMAREAD; GUID SANMemoryRegistrationCacheCallback = WSAID_MEMORYREGISTRATIONCACHECALLBACK; - CL_ENTER( IBSP_DBG_OPT, gdbg_lvl ); + IBSP_ENTER( IBSP_DBG_OPT ); UNUSED_PARAM( cbInBuffer ); UNUSED_PARAM( lpOverlapped ); UNUSED_PARAM( lpCompletionRoutine ); UNUSED_PARAM( lpThreadId ); - fzprint(("%s():%d:0x%x:0x%x: socket=0x%p overlapped=0x%p\n", __FUNCTION__, - __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s, lpOverlapped)); - if( dwIoControlCode == SIO_GET_EXTENSION_FUNCTION_POINTER ) { /* This a special case. The socket handle passed is not valid. */ - CL_TRACE( IBSP_DBG_OPT, gdbg_lvl, ("Get extension function pointer\n") ); + IBSP_TRACE1( IBSP_DBG_OPT, ("Get extension function pointer\n") ); if( memcmp( lpvInBuffer, &SANRegisterMemory, sizeof(GUID) ) == 0 ) { @@ -1418,6 +1404,8 @@ IBSPIoctl( { if( no_read ) { + IBSP_TRACE( IBSP_DBG_WARN | IBSP_DBG_OPT, + ("RDMA_READ disabled.\n") ); *lpErrno = WSAEOPNOTSUPP; return SOCKET_ERROR; } @@ -1436,10 +1424,11 @@ IBSPIoctl( } else { - CL_EXIT_ERROR( IBSP_DBG_OPT, gdbg_lvl, ("invalid extension GUID\n") ); + IBSP_ERROR_EXIT( ("invalid extension GUID\n") ); *lpErrno = WSAEINVAL; return SOCKET_ERROR; } + IBSP_EXIT( IBSP_DBG_OPT ); return 0; } @@ -1611,7 +1600,8 @@ IBSPRecv( { /* Seen in real life with overlap/client test. * The switch closes a socket then calls this. Why? */ - IBSP_ERROR_EXIT( ("invalid socket handle %x\n", s) ); + IBSP_TRACE_EXIT( IBSP_DBG_WARN | IBSP_DBG_IO, + ("invalid socket handle %x\n", s) ); *lpErrno = WSAENOTSOCK; return SOCKET_ERROR; } @@ -1826,13 +1816,20 @@ IBSPSend( CL_ASSERT( lpCompletionRoutine == NULL ); CL_ASSERT( lpOverlapped != NULL ); + cl_spinlock_acquire( &socket_info->mutex ); /* Check the state of the socket */ - if( socket_info->socket_state != IBSP_CONNECTED ) + switch( socket_info->socket_state ) { + case IBSP_CONNECTED: + case IBSP_DISCONNECTED: + break; + + default: IBSP_ERROR_EXIT( ("Socket is not in connected socket_state \n") ); *lpErrno = WSAENOTCONN; return SOCKET_ERROR; } + cl_spinlock_release( &socket_info->mutex ); if( socket_info->qp_error ) { @@ -1957,7 +1954,8 @@ IBSPSend( #ifdef _DEBUG_ if( lpBuffers[0].len >= 40 ) { - debug_dump_buffer( IBSP_DBG_WQ, gdbg_lvl, "SEND", lpBuffers[0].buf, 40 ); + debug_dump_buffer( IBSP_DBG_WQ | IBSP_DBG_LEVEL4, "SEND", + lpBuffers[0].buf, 40 ); } #endif @@ -2113,13 +2111,13 @@ IBSPSocket( { struct ibsp_socket_info *socket_info = NULL; - CL_ENTER( IBSP_DBG_CONN, gdbg_lvl ); + IBSP_ENTER( IBSP_DBG_SI ); UNUSED_PARAM( g ); if( af != AF_INET ) { - CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, + IBSP_ERROR( ("bad family %d instead of %d\n", af, AF_INET) ); *lpErrno = WSAEAFNOSUPPORT; goto error; @@ -2127,7 +2125,7 @@ IBSPSocket( if( type != SOCK_STREAM ) { - CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, + IBSP_ERROR( ("bad type %d instead of %d\n", type, SOCK_STREAM) ); *lpErrno = WSAEPROTOTYPE; goto error; @@ -2135,7 +2133,7 @@ IBSPSocket( if( protocol != IPPROTO_TCP ) { - CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, + IBSP_ERROR( ("bad protocol %d instead of %d\n", protocol, IPPROTO_TCP) ); *lpErrno = WSAEPROTONOSUPPORT; goto error; @@ -2143,7 +2141,7 @@ IBSPSocket( if( (dwFlags != WSA_FLAG_OVERLAPPED) ) { - CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, + IBSP_ERROR( ("dwFlags is not WSA_FLAG_OVERLAPPED (%x)\n", dwFlags) ); *lpErrno = WSAEINVAL; goto error; @@ -2152,7 +2150,7 @@ IBSPSocket( socket_info = create_socket_info(); if( socket_info == NULL ) { - CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, ("create_socket_info return NULL\n") ); + IBSP_ERROR( ("create_socket_info return NULL\n") ); *lpErrno = WSAENOBUFS; goto error; } @@ -2165,7 +2163,7 @@ IBSPSocket( ret = setup_duplicate_socket( socket_info, lpProtocolInfo->dwProviderReserved ); if( ret ) { - CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, + IBSP_ERROR( ("setup_duplicate_socket failed with %d\n",ret) ); *lpErrno = ret; goto error; @@ -2180,9 +2178,8 @@ IBSPSocket( if( socket_info->switch_socket != INVALID_SOCKET ) { - fzprint(("%s():%d:0x%x:0x%x: socket_info=0x%p switch_socket=0x%p \n", - __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), - socket_info, socket_info->switch_socket)); + IBSP_TRACE1( IBSP_DBG_SI, ("socket_info=0x%p switch_socket=0x%p \n", + socket_info, socket_info->switch_socket) ); STAT_INC( wpusocket_num ); } @@ -2190,7 +2187,7 @@ IBSPSocket( if( socket_info->switch_socket == INVALID_SOCKET ) { - CL_ERROR( IBSP_DBG_CONN, gdbg_lvl, + IBSP_ERROR( ("WPUCreateSocketHandle() failed: %d\n", *lpErrno) ); /* lpErrno has just been set */ goto error; @@ -2220,7 +2217,7 @@ IBSPSocket( fzprint(("%s():%d:0x%x:0x%x: socket=0x%p\n", __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), socket_info)); - CL_TRACE_EXIT( IBSP_DBG_CONN, gdbg_lvl, + IBSP_TRACE_EXIT( IBSP_DBG_SI, ("returning socket handle %p\n", socket_info) ); return (SOCKET) socket_info; @@ -2231,7 +2228,7 @@ error: CL_ASSERT( *lpErrno != 0 ); - CL_EXIT_ERROR( IBSP_DBG_CONN, gdbg_lvl, ("Returning error %d\n", *lpErrno) ); + IBSP_ERROR_EXIT( ("Returning error %d\n", *lpErrno) ); return INVALID_SOCKET; } diff --git a/trunk/ulp/wsd/user/ibspdll.h b/trunk/ulp/wsd/user/ibspdll.h index 7322dbc2..e03a95c9 100644 --- a/trunk/ulp/wsd/user/ibspdll.h +++ b/trunk/ulp/wsd/user/ibspdll.h @@ -42,7 +42,8 @@ #include #include -#include "iba/ib_al.h" +#include +#include #include "ibspdefines.h" #include "ibspdebug.h" @@ -54,4 +55,6 @@ extern struct ibspdll_globals g_ibsp; +extern uint32_t g_max_inline; + #endif /* IBSPDLL_H */ diff --git a/trunk/ulp/wsd/user/ibspproto.h b/trunk/ulp/wsd/user/ibspproto.h index 8e74c60d..a9b11227 100644 --- a/trunk/ulp/wsd/user/ibspproto.h +++ b/trunk/ulp/wsd/user/ibspproto.h @@ -258,6 +258,12 @@ void ib_deregister_all_mr( IN struct mr_list *mem_list ); +void +ibsp_post_select_event( + struct ibsp_socket_info *socket_info, + int event, + int error ); + /* ibspdll.c */ extern int init_globals( void ); diff --git a/trunk/ulp/wsd/user/ibspstruct.h b/trunk/ulp/wsd/user/ibspstruct.h index 2a76baee..e5d79acd 100644 --- a/trunk/ulp/wsd/user/ibspstruct.h +++ b/trunk/ulp/wsd/user/ibspstruct.h @@ -260,12 +260,10 @@ struct ibsp_socket_info } info; /* Variables associated with IBSPSelectEvent */ - cl_spinlock_t event_mutex; WSAEVENT event_select; /* Handle to Event Object */ long event_mask; /* Events we care about */ long network_events; /* Events that happenned */ int errno_connect; /* errno code (if any) returned by connect */ - int errno_accept; /* errno code (if any) returned by accept */ struct ibsp_socket_options socket_options; /* Socket Options */ diff --git a/trunk/ulp/wsd/user/sockinfo.c b/trunk/ulp/wsd/user/sockinfo.c index ad144137..d008072c 100644 --- a/trunk/ulp/wsd/user/sockinfo.c +++ b/trunk/ulp/wsd/user/sockinfo.c @@ -58,7 +58,6 @@ create_socket_info(void) cl_qlist_init( &socket_info->buf_mem_list.list ); cl_spinlock_init( &socket_info->buf_mem_list.mutex ); - cl_spinlock_init( &socket_info->event_mutex ); cl_qlist_init( &socket_info->info.listen.list ); @@ -93,7 +92,6 @@ free_socket_info( IBSP_ENTER( IBSP_DBG_SI ); cl_spinlock_destroy( &socket_info->buf_mem_list.mutex ); - cl_spinlock_destroy( &socket_info->event_mutex ); cl_spinlock_destroy( &socket_info->mutex ); cl_spinlock_destroy( &socket_info->send_lock );