"AL_OBJ_TYPE_H_CA_ATTR",\r
"AL_OBJ_TYPE_H_PNP_EVENT",\r
"AL_OBJ_TYPE_H_SA_REG",\r
- "AL_OBJ_TYPE_H_FMR"\r
+ "AL_OBJ_TYPE_H_FMR",\r
+ "AL_OBJ_TYPE_H_SRQ",\r
+ "AL_OBJ_TYPE_H_FMR_POOL"\r
};\r
\r
\r
#define AL_OBJ_TYPE_H_SA_REG 39\r
#define AL_OBJ_TYPE_H_FMR 40\r
#define AL_OBJ_TYPE_H_SRQ 41\r
-#define AL_OBJ_TYPE_INVALID 42 /* Must be last type. */\r
+#define AL_OBJ_TYPE_H_FMR_POOL 42\r
+#define AL_OBJ_TYPE_INVALID 43 /* Must be last type. */\r
\r
/* Kernel object for a user-mode app. */\r
#define AL_OBJ_SUBTYPE_UM_EXPORT 0x80000000\r
WPP_DEFINE_BIT( AL_DBG_AL_OBJ) \\r
WPP_DEFINE_BIT( AL_DBG_SMI) \\r
WPP_DEFINE_BIT( AL_DBG_SMI_CB) \\r
- WPP_DEFINE_BIT( AL_DBG_RES1) \\r
+ WPP_DEFINE_BIT( AL_DBG_FMR_POOL) \\r
WPP_DEFINE_BIT( AL_DBG_MAD_POOL) \\r
WPP_DEFINE_BIT( AL_DBG_MAD_SVC) \\r
WPP_DEFINE_BIT( AL_DBG_RES2) \\r
#define AL_DBG_AL_OBJ (1 << 3)\r
#define AL_DBG_SMI (1 << 4)\r
#define AL_DBG_SMI_CB (1 << 5)\r
+#define AL_DBG_FMR_POOL (1 << 6)\r
#define AL_DBG_MAD_POOL (1 << 7)\r
#define AL_DBG_MAD_SVC (1 << 8)\r
#define AL_DBG_CM (1 << 10)\r
{\r
al_obj_t obj;\r
mlnx_fmr_handle_t h_ci_fmr; /* Actual HW handle. */\r
-\r
+ struct _mlnx_fmr* __ptr64 p_next;\r
} mlnx_fmr_t;\r
\r
\r
+\r
cl_status_t\r
mlnx_fmr_ctor(\r
IN void* const p_object,\r
mlnx_fmr_dtor(\r
IN const cl_pool_item_t* const p_pool_item,\r
IN void* context );\r
+\r
+\r
+\r
#endif\r
\r
\r
h_fmr->obj.p_ci_ca->verbs.map_phys_mlnx_fmr( h_fmr->h_ci_fmr,\\r
plist_addr, list_len, p_vaddr, p_lkey, p_rkey )\r
\r
-#define verbs_unmap_mlnx_fmr( h_fmr ) \\r
- h_fmr->obj.p_ci_ca->verbs.unmap_mlnx_fmr( h_fmr->h_ci_fmr)\r
+#define verbs_unmap_mlnx_fmr( h_fmr, p_fmr_array ) \\r
+ h_fmr->obj.p_ci_ca->verbs.unmap_mlnx_fmr( p_fmr_array)\r
\r
#define verbs_destroy_mlnx_fmr( h_fmr ) \\r
h_fmr->obj.p_ci_ca->verbs.dealloc_mlnx_fmr( h_fmr->h_ci_fmr )\r
al_dev.c \\r
al_ioc_pnp.c \\r
al_mad_pool.c \\r
+ al_fmr_pool.c \\r
al_mgr.c \\r
al_mr.c \\r
al_pnp.c \\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: al_fmr_pool.h 1611 2006-08-20 14:48:55Z sleybo $\r
+ */\r
+\r
+\r
+\r
+\r
+#include "al_debug.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "al_fmr_pool.tmh"\r
+#endif\r
+\r
+#include "al_fmr_pool.h"\r
+#include "al_mr.h"\r
+#include "al_pd.h"\r
+\r
+#define hash_mix(a, b, c) \\r
+ { \\r
+ a -= b; a -= c; a ^= (c>>13); \\r
+ b -= c; b -= a; b ^= (a<<8); \\r
+ c -= a; c -= b; c ^= (b>>13); \\r
+ a -= b; a -= c; a ^= (c>>12); \\r
+ b -= c; b -= a; b ^= (a<<16); \\r
+ c -= a; c -= b; c ^= (b>>5); \\r
+ a -= b; a -= c; a ^= (c>>3); \\r
+ b -= c; b -= a; b ^= (a<<10); \\r
+ c -= a; c -= b; c ^= (b>>15); \\r
+}\r
+\r
+static inline uint32_t hash_2words(uint32_t a, uint32_t b, uint32_t c)\r
+{\r
+ a += 0x9e3779b9;\r
+ b += 0x9e3779b9;\r
+ hash_mix(a, b, c);\r
+ return c;\r
+}\r
+\r
+enum {\r
+ IB_FMR_MAX_REMAPS = 32,\r
+\r
+ IB_FMR_HASH_BITS = 8,\r
+ IB_FMR_HASH_SIZE = 1 << IB_FMR_HASH_BITS,\r
+ IB_FMR_HASH_MASK = IB_FMR_HASH_SIZE - 1\r
+};\r
+\r
+\r
+static inline uint32_t __fmr_hash(uint64_t first_page)\r
+{\r
+ return hash_2words((uint32_t) first_page, (uint32_t) (first_page >> 32), 0) &\r
+ (IB_FMR_HASH_SIZE - 1);\r
+}\r
+\r
+/* Caller must hold pool_lock */\r
+static inline mlnx_fmr_pool_element_t *__fmr_cache_lookup(\r
+ mlnx_fmr_pool_t *p_pool,\r
+ const uint64_t* const page_list,\r
+ int page_list_len,\r
+ uint64_t io_virtual_address)\r
+{\r
+ cl_qlist_t *bucket;\r
+ cl_list_item_t *p_list_item;\r
+ mlnx_fmr_pool_element_t *p_fmr_el;\r
+\r
+ if (!p_pool->cache_bucket)\r
+ return NULL;\r
+\r
+ bucket = p_pool->cache_bucket + __fmr_hash(*page_list);\r
+\r
+ for( p_list_item = cl_qlist_head( bucket );\r
+ p_list_item != cl_qlist_end( bucket);\r
+ p_list_item = cl_qlist_next( p_list_item ) )\r
+ {\r
+ p_fmr_el = PARENT_STRUCT( p_list_item, mlnx_fmr_pool_element_t, cache_node );\r
+ if (io_virtual_address == p_fmr_el->io_virtual_address &&\r
+ page_list_len == p_fmr_el->page_list_len &&\r
+ !memcmp(page_list, p_fmr_el->page_list, page_list_len * sizeof *page_list))\r
+ return p_fmr_el;\r
+ }\r
+\r
+ return NULL;\r
+}\r
+\r
+\r
+static void \r
+__fmr_pool_batch_release(mlnx_fmr_pool_t *p_pool)\r
+{\r
+ ib_api_status_t status;\r
+ mlnx_fmr_pool_element_t *p_fmr_el;\r
+ mlnx_fmr_handle_t h_fmr = NULL;\r
+ cl_qlist_t unmap_list;\r
+ cl_list_item_t *p_list_item;\r
+ cl_qlist_t *bucket;\r
+\r
+ cl_qlist_init(&unmap_list);\r
+ \r
+ cl_spinlock_acquire(&p_pool->pool_lock);\r
+\r
+ for( p_list_item = cl_qlist_head( &p_pool->dirty_list );\r
+ p_list_item != cl_qlist_end( &p_pool->dirty_list);\r
+ p_list_item = cl_qlist_next( p_list_item ) )\r
+ {\r
+ p_fmr_el = PARENT_STRUCT( p_list_item, mlnx_fmr_pool_element_t, list_item );\r
+ if (p_fmr_el->in_cash)\r
+ {\r
+ p_fmr_el->in_cash = FALSE;\r
+ bucket = p_pool->cache_bucket + __fmr_hash(p_fmr_el->page_list[0]);\r
+ cl_qlist_remove_item( bucket, &p_fmr_el->cache_node );\r
+ }\r
+ p_fmr_el->remap_count = 0;\r
+ p_fmr_el->h_fmr->p_next = h_fmr;\r
+ h_fmr = p_fmr_el->h_fmr;\r
+ if (p_fmr_el->ref_count !=0) \r
+ {\r
+ AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_FMR_POOL, ("Unmapping FMR 0x%p with ref count %d",\r
+ p_fmr_el, p_fmr_el->ref_count));\r
+ }\r
+ }\r
+\r
+ cl_qlist_insert_list_head(&unmap_list, &p_pool->dirty_list );\r
+ cl_qlist_init(&p_pool->dirty_list);\r
+ p_pool->dirty_len = 0;\r
+\r
+ cl_spinlock_release( &p_pool->pool_lock );\r
+\r
+ if (cl_is_qlist_empty(&unmap_list)) {\r
+ return;\r
+ }\r
+\r
+ status = mlnx_unmap_fmr(h_fmr);\r
+ if (status != IB_SUCCESS)\r
+ AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_FMR_POOL, ("mlnx_unmap_fmr returned %s", ib_get_err_str(status)));\r
+\r
+\r
+ cl_spinlock_acquire( &p_pool->pool_lock );\r
+ cl_qlist_insert_list_head(&p_pool->free_list,&unmap_list);\r
+ cl_spinlock_release( &p_pool->pool_lock );\r
+}\r
+\r
+\r
+\r
+static int \r
+__fmr_cleanup_thread(void * p_pool_ptr)\r
+{\r
+ mlnx_fmr_pool_t *p_pool = p_pool_ptr;\r
+ atomic32_t flush_req;\r
+ int forever = 1;\r
+\r
+ do {\r
+ flush_req = 0;\r
+ if (p_pool->flush_req || p_pool->dirty_len >= p_pool->dirty_watermark)\r
+ {\r
+ __fmr_pool_batch_release(p_pool);\r
+\r
+ if (p_pool->flush_req) \r
+ {\r
+ cl_event_signal(&p_pool->flush_done_event);\r
+ flush_req = cl_atomic_dec( &p_pool->flush_req );\r
+ }\r
+ \r
+ if (p_pool->flush_function)\r
+ p_pool->flush_function( (mlnx_fmr_pool_handle_t)p_pool, p_pool->flush_arg);\r
+ }\r
+\r
+ if (!flush_req)\r
+ {\r
+ if (p_pool->should_stop)\r
+ break;\r
+ cl_event_wait_on(&p_pool->do_flush_event, EVENT_NO_TIMEOUT, TRUE);\r
+ }\r
+ } while (forever);\r
+\r
+ return 0;\r
+}\r
+\r
+/*\r
+ * Destroying the pool.\r
+ */\r
+static void\r
+__destroying_fmr_pool(\r
+ IN al_obj_t* p_obj )\r
+{\r
+ mlnx_fmr_pool_t* p_pool;\r
+\r
+ CL_ASSERT( p_obj );\r
+ p_pool = PARENT_STRUCT( p_obj, mlnx_fmr_pool_t, obj );\r
+ AL_PRINT(TRACE_LEVEL_ERROR, AL_DBG_FMR_POOL, ("pool %p\n", p_pool));\r
+\r
+ // notify cleaning thread to exit\r
+ cl_atomic_inc( &p_pool->should_stop );\r
+ cl_event_signal(&p_pool->do_flush_event);\r
+ cl_thread_destroy(&p_pool->thread);\r
+}\r
+\r
+/*\r
+ * Cleanup the pool.\r
+ */\r
+static void\r
+__cleanup_fmr_pool(\r
+ IN al_obj_t* p_obj )\r
+{\r
+ int i=0;\r
+ ib_api_status_t status = IB_SUCCESS;\r
+ mlnx_fmr_pool_t* p_pool;\r
+ mlnx_fmr_pool_element_t *p_fmr_el;\r
+ cl_list_item_t *p_list_item;\r
+ cl_qlist_t *bucket;\r
+\r
+ CL_ASSERT( p_obj );\r
+ p_pool = PARENT_STRUCT( p_obj, mlnx_fmr_pool_t, obj );\r
+ AL_PRINT(TRACE_LEVEL_ERROR, AL_DBG_FMR_POOL, ("pool %p\n", p_pool));\r
+\r
+ // cleanup the dirty list stuff\r
+ __fmr_pool_batch_release(p_pool);\r
+\r
+ cl_spinlock_acquire(&p_pool->pool_lock);\r
+\r
+ // merge the rest with free list\r
+ for( p_list_item = cl_qlist_head( &p_pool->rest_list );\r
+ p_list_item != cl_qlist_end( &p_pool->rest_list );\r
+ p_list_item = cl_qlist_head( &p_pool->rest_list ) )\r
+ {\r
+ p_fmr_el = PARENT_STRUCT( p_list_item, mlnx_fmr_pool_element_t, list_item );\r
+ if (p_fmr_el->in_cash)\r
+ {\r
+ p_fmr_el->in_cash = FALSE;\r
+ bucket = p_pool->cache_bucket + __fmr_hash(p_fmr_el->page_list[0]);\r
+ cl_qlist_remove_item( bucket, &p_fmr_el->cache_node );\r
+ }\r
+ cl_qlist_remove_item(&p_pool->rest_list, p_list_item);\r
+ cl_qlist_insert_tail(&p_pool->free_list, &p_fmr_el->list_item);\r
+ p_fmr_el->p_cur_list = &p_pool->free_list;\r
+ }\r
+\r
+ // cleanup the free list\r
+ for( p_list_item = cl_qlist_head( &p_pool->free_list );\r
+ p_list_item != cl_qlist_end( &p_pool->free_list );\r
+ p_list_item = cl_qlist_head( &p_pool->free_list ) )\r
+ {\r
+ p_fmr_el = PARENT_STRUCT( p_list_item, mlnx_fmr_pool_element_t, list_item);\r
+ cl_spinlock_release( &p_pool->pool_lock );\r
+ if (p_fmr_el->remap_count)\r
+ {\r
+ p_fmr_el->h_fmr->p_next = NULL;\r
+ status = mlnx_unmap_fmr(p_fmr_el->h_fmr);\r
+ if (status != IB_SUCCESS)\r
+ AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_FMR_POOL, ("mlnx_unmap_fmr returned %s\n", ib_get_err_str(status)));\r
+\r
+ }\r
+ status = mlnx_destroy_fmr(p_fmr_el->h_fmr);\r
+ if (status != IB_SUCCESS)\r
+ AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_FMR_POOL, ("mlnx_destroy_fmr returned %s\n", ib_get_err_str(status)));\r
+\r
+ cl_spinlock_acquire(&p_pool->pool_lock);\r
+ cl_qlist_remove_item(&p_pool->free_list, p_list_item);\r
+ cl_free(p_fmr_el);\r
+ ++i;\r
+ }\r
+\r
+ cl_spinlock_release( &p_pool->pool_lock );\r
+\r
+ if (i < p_pool->pool_size)\r
+ AL_PRINT(TRACE_LEVEL_ERROR, AL_DBG_FMR_POOL, ("pool still has %d regions registered\n",\r
+ p_pool->pool_size - i));\r
+}\r
+\r
+\r
+/*\r
+ * Free the pool.\r
+ */\r
+static void\r
+__free_fmr_pool(\r
+ IN al_obj_t* p_obj )\r
+{\r
+ mlnx_fmr_pool_t* p_pool;\r
+\r
+ CL_ASSERT( p_obj );\r
+ p_pool = PARENT_STRUCT( p_obj, mlnx_fmr_pool_t, obj );\r
+\r
+ cl_spinlock_destroy(&p_pool->pool_lock);\r
+ destroy_al_obj( &p_pool->obj );\r
+ if (p_pool->cache_bucket)\r
+ cl_free( p_pool->cache_bucket );\r
+ cl_free( p_pool );\r
+ AL_PRINT(TRACE_LEVEL_ERROR, AL_DBG_FMR_POOL, ("__free_pool: pool %p\n", p_pool));\r
+}\r
+\r
+\r
+\r
+ib_api_status_t\r
+mlnx_create_fmr_pool(\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN const mlnx_fmr_pool_create_t *p_fmr_pool_attr,\r
+ OUT mlnx_fmr_pool_handle_t* const ph_pool )\r
+{\r
+ ib_api_status_t status = IB_SUCCESS;\r
+ mlnx_fmr_pool_t *p_pool;\r
+ int i;\r
+ int max_remaps;\r
+ cl_status_t cl_status;\r
+ mlnx_fmr_pool_element_t *p_fmr_el;\r
+\r
+\r
+ AL_ENTER( AL_DBG_FMR_POOL );\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) )\r
+ {\r
+ AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") );\r
+ status = IB_INVALID_AL_HANDLE;\r
+ goto end;\r
+ }\r
+\r
+ if( !ph_pool )\r
+ {\r
+ AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ status = IB_INVALID_PARAMETER;\r
+ goto end;\r
+ }\r
+\r
+ if( !p_fmr_pool_attr || !p_fmr_pool_attr->dirty_watermark)\r
+ {\r
+ AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ status = IB_INVALID_PARAMETER;\r
+ goto end;\r
+ }\r
+\r
+ if (!h_pd->obj.p_ci_ca || !h_pd->obj.p_ci_ca->p_pnp_attr) \r
+ {\r
+ AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_STATE\n") );\r
+ status = IB_INVALID_STATE;\r
+ goto end;\r
+ }\r
+ \r
+ // check whether the device support FMR\r
+ if (!h_pd->obj.p_ci_ca->verbs.alloc_mlnx_fmr|| !h_pd->obj.p_ci_ca->verbs.dealloc_mlnx_fmr ||\r
+ !h_pd->obj.p_ci_ca->verbs.map_phys_mlnx_fmr || !h_pd->obj.p_ci_ca->verbs.unmap_mlnx_fmr) {\r
+ AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Device does not support fast memory regions"));\r
+ status = IB_UNSUPPORTED;\r
+ goto end;\r
+ }\r
+\r
+ if (!h_pd->obj.p_ci_ca->p_pnp_attr->max_map_per_fmr)\r
+ {\r
+ max_remaps = IB_FMR_MAX_REMAPS;\r
+ }\r
+ else\r
+ {\r
+ max_remaps = h_pd->obj.p_ci_ca->p_pnp_attr->max_map_per_fmr;\r
+ }\r
+\r
+ // allocate pool object\r
+ p_pool = cl_zalloc( sizeof( mlnx_fmr_pool_t ) );\r
+ if( !p_pool )\r
+ {\r
+ AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Couldn't allocate pool struct"));\r
+ status = IB_INSUFFICIENT_MEMORY;\r
+ goto err_alloc_pool_obj;\r
+ }\r
+\r
+ // construct pool objects\r
+ cl_spinlock_construct( &p_pool->pool_lock);\r
+ cl_thread_construct(&p_pool->thread);\r
+ cl_event_construct(&p_pool->do_flush_event);\r
+ cl_event_construct(&p_pool->flush_done_event);\r
+\r
+\r
+ // init pool objects\r
+ p_pool->pool_size = 0;\r
+ p_pool->max_pages = p_fmr_pool_attr->max_pages_per_fmr;\r
+ p_pool->max_remaps = max_remaps;\r
+ p_pool->dirty_watermark = p_fmr_pool_attr->dirty_watermark;\r
+ p_pool->dirty_len = 0;\r
+ p_pool->cache_bucket = NULL;\r
+ p_pool->flush_function = p_fmr_pool_attr->flush_function;\r
+ p_pool->flush_arg = p_fmr_pool_attr->flush_arg;\r
+ cl_qlist_init(&p_pool->dirty_list);\r
+ cl_qlist_init(&p_pool->free_list);\r
+ cl_qlist_init(&p_pool->rest_list);\r
+\r
+ if (p_fmr_pool_attr->cache) {\r
+ p_pool->cache_bucket =\r
+ cl_zalloc(IB_FMR_HASH_SIZE * sizeof *p_pool->cache_bucket);\r
+ if (!p_pool->cache_bucket) {\r
+ AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed to allocate cache in pool"));\r
+ status = IB_INSUFFICIENT_MEMORY;\r
+ goto err_alloc_cache;\r
+ }\r
+\r
+ for (i = 0; i < IB_FMR_HASH_SIZE; ++i)\r
+ cl_qlist_init(p_pool->cache_bucket + i);\r
+ }\r
+\r
+ cl_status = cl_spinlock_init( &p_pool->pool_lock );\r
+ if( cl_status != CL_SUCCESS ) \r
+ {\r
+ AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed cl_spinlock_init"));\r
+ status = IB_ERROR;\r
+ goto err_pool_init;\r
+ }\r
+\r
+ cl_event_init(&p_pool->do_flush_event,FALSE);\r
+ if( cl_status != CL_SUCCESS ) \r
+ {\r
+ AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed cl_event_init"));\r
+ status = IB_ERROR;\r
+ goto err_pool_init;\r
+ }\r
+\r
+ cl_event_init(&p_pool->flush_done_event,FALSE);\r
+ if( cl_status != CL_SUCCESS ) \r
+ {\r
+ AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed cl_event_init"));\r
+ status = IB_ERROR;\r
+ goto err_pool_init;\r
+ }\r
+\r
+ cl_thread_init(&p_pool->thread ,__fmr_cleanup_thread,p_pool,"fmr_cleanup");\r
+ if( cl_status != CL_SUCCESS ) \r
+ {\r
+ AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed cl_thread_init"));\r
+ status = IB_ERROR;\r
+ goto err_pool_init;\r
+ }\r
+\r
+ {\r
+ mlnx_fmr_create_t fmr_attr;\r
+ \r
+ fmr_attr.max_pages = p_fmr_pool_attr->max_pages_per_fmr,\r
+ fmr_attr.max_maps = p_pool->max_remaps,\r
+ fmr_attr.page_size = p_fmr_pool_attr->page_size;\r
+ fmr_attr.access_ctrl = p_fmr_pool_attr->access_ctrl;\r
+\r
+\r
+ for (i = 0; i < p_fmr_pool_attr->pool_size; ++i)\r
+ {\r
+ p_fmr_el = cl_zalloc(sizeof (mlnx_fmr_pool_element_t) + p_fmr_pool_attr->max_pages_per_fmr * sizeof (uint64_t));\r
+ if (!p_fmr_el)\r
+ {\r
+ AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, (" failed to allocate struct for FMR %d \n",i));\r
+ status = IB_INSUFFICIENT_MEMORY;\r
+ goto err_alloc_cache_el;\r
+ }\r
+\r
+ p_fmr_el->h_pool = (mlnx_fmr_pool_handle_t)p_pool;\r
+ p_fmr_el->remap_count = 0;\r
+ p_fmr_el->ref_count = 0;\r
+\r
+ status = mlnx_create_fmr(h_pd, &fmr_attr,&p_fmr_el->h_fmr);\r
+ if (status != IB_SUCCESS)\r
+ {\r
+ AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
+ ("mlnx_create_fmr failed for FMR %d with status %s.\n",i,ib_get_err_str(status)));\r
+ cl_free(p_fmr_el);\r
+ goto err_alloc_cache_el;\r
+ }\r
+\r
+ cl_qlist_insert_tail(&p_pool->free_list, &p_fmr_el->list_item);\r
+ p_fmr_el->p_cur_list = &p_pool->free_list;\r
+ ++p_pool->pool_size;\r
+ }\r
+\r
+ }\r
+\r
+ /* Do IBAL stuff for creating and iniitializing the object */\r
+ construct_al_obj( &p_pool->obj, AL_OBJ_TYPE_H_FMR_POOL);\r
+\r
+ status = init_al_obj( &p_pool->obj, p_pool, FALSE, __destroying_fmr_pool, __cleanup_fmr_pool, __free_fmr_pool );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
+ ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) );\r
+ goto err_init_al_obj;\r
+ }\r
+\r
+ /* Attach the pool to the AL object. */\r
+ status = attach_al_obj( &h_pd->obj, &p_pool->obj );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ ref_al_obj( &p_pool->obj );\r
+ p_pool->obj.pfn_destroy( &p_pool->obj, NULL );\r
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
+ ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
+ goto end;\r
+ }\r
+\r
+\r
+ /* Release the reference taken in init_al_obj */\r
+ deref_al_obj( &p_pool->obj );\r
+\r
+ *ph_pool = p_pool;\r
+ status = IB_SUCCESS;\r
+ goto end;\r
+\r
+err_init_al_obj:\r
+ destroy_al_obj( &p_pool->obj );\r
+\r
+err_alloc_cache_el:\r
+ __destroying_fmr_pool( &p_pool->obj );\r
+ __cleanup_fmr_pool( &p_pool->obj );\r
+\r
+err_pool_init:\r
+ if (p_pool->cache_bucket)\r
+ cl_free( p_pool->cache_bucket );\r
+\r
+err_alloc_cache: \r
+ cl_free( p_pool );\r
+\r
+err_alloc_pool_obj:\r
+end:\r
+ AL_EXIT( AL_DBG_FMR_POOL );\r
+ return status;\r
+}\r
+\r
+/**\r
+ * ib_destroy_fmr_pool - Free FMR pool\r
+ * @pool:FMR pool to free\r
+ *\r
+ * Destroy an FMR pool and free all associated resources.\r
+ */\r
+ib_api_status_t\r
+mlnx_destroy_fmr_pool(\r
+ IN const mlnx_fmr_pool_handle_t h_pool)\r
+{\r
+ mlnx_fmr_pool_t *p_pool = (mlnx_fmr_pool_t*)h_pool;\r
+\r
+ AL_ENTER( AL_DBG_FMR_POOL );\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_pool, AL_OBJ_TYPE_H_FMR_POOL ) )\r
+ {\r
+ AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ ref_al_obj( &p_pool->obj );\r
+ p_pool->obj.pfn_destroy( &p_pool->obj, NULL );\r
+\r
+ AL_EXIT( AL_DBG_FMR_POOL );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+\r
+ib_api_status_t\r
+mlnx_flush_fmr_pool(mlnx_fmr_pool_handle_t h_pool)\r
+{\r
+\r
+ ib_api_status_t status = IB_SUCCESS;\r
+ mlnx_fmr_pool_t *p_pool = (mlnx_fmr_pool_t*)h_pool;\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_pool, AL_OBJ_TYPE_H_FMR_POOL ) )\r
+ {\r
+ AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ ref_al_obj( &p_pool->obj );\r
+\r
+ cl_atomic_inc( &p_pool->flush_req );\r
+ cl_event_signal(&p_pool->do_flush_event);\r
+ if (cl_event_wait_on(&p_pool->flush_done_event, EVENT_NO_TIMEOUT, TRUE))\r
+ status = IB_ERROR;\r
+\r
+ deref_al_obj( &p_pool->obj );\r
+\r
+ return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_map_phys_fmr_pool(\r
+ IN const mlnx_fmr_pool_handle_t h_pool ,\r
+ IN const uint64_t* const page_list,\r
+ IN const int list_len,\r
+ IN OUT uint64_t* const p_vaddr,\r
+ OUT net32_t* const p_lkey,\r
+ OUT net32_t* const p_rkey,\r
+ OUT mlnx_fmr_pool_el_t *pp_fmr_el)\r
+{\r
+\r
+ ib_api_status_t status = IB_SUCCESS;\r
+ mlnx_fmr_pool_t *p_pool = (mlnx_fmr_pool_t*)h_pool;\r
+ mlnx_fmr_pool_element_t *p_fmr_el;\r
+ cl_qlist_t *bucket;\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_pool, AL_OBJ_TYPE_H_FMR_POOL ) )\r
+ {\r
+ AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ if (list_len < 1 || list_len > p_pool->max_pages)\r
+ return IB_INVALID_PARAMETER;\r
+\r
+ ref_al_obj( &p_pool->obj );\r
+\r
+ cl_spinlock_acquire(&p_pool->pool_lock);\r
+\r
+ p_fmr_el = __fmr_cache_lookup( p_pool, page_list, list_len, *p_vaddr );\r
+ if (p_fmr_el) {\r
+ /* found in cache */\r
+ ++p_fmr_el->ref_count;\r
+ if (p_fmr_el->ref_count == 1) {\r
+ cl_qlist_remove_item( p_fmr_el->p_cur_list, &p_fmr_el->list_item );\r
+ cl_qlist_insert_tail(&p_pool->rest_list, &p_fmr_el->list_item);\r
+ p_fmr_el->p_cur_list = &p_pool->rest_list;\r
+ }\r
+\r
+ cl_spinlock_release(&p_pool->pool_lock);\r
+ goto end;\r
+ }\r
+ \r
+ if (cl_is_qlist_empty(&p_pool->free_list)) {\r
+ cl_spinlock_release(&p_pool->pool_lock);\r
+ status = IB_RESOURCE_BUSY;\r
+ goto exit;\r
+ }\r
+\r
+ p_fmr_el = PARENT_STRUCT(cl_qlist_remove_head(&p_pool->free_list),mlnx_fmr_pool_element_t,list_item);\r
+ if (p_fmr_el->in_cash)\r
+ {\r
+ p_fmr_el->in_cash = FALSE;\r
+ bucket = p_pool->cache_bucket + __fmr_hash(p_fmr_el->page_list[0]);\r
+ cl_qlist_remove_item( bucket, &p_fmr_el->cache_node );\r
+ }\r
+ cl_spinlock_release(&p_pool->pool_lock);\r
+\r
+ status = mlnx_map_phys_fmr(p_fmr_el->h_fmr, page_list,\r
+ list_len, p_vaddr, p_lkey, p_rkey);\r
+\r
+ if (status != IB_SUCCESS) {\r
+ cl_spinlock_acquire(&p_pool->pool_lock);\r
+ cl_qlist_insert_tail(&p_pool->free_list, &p_fmr_el->list_item);\r
+ p_fmr_el->p_cur_list = &p_pool->free_list;\r
+ cl_spinlock_release(&p_pool->pool_lock);\r
+ goto exit;\r
+ }\r
+\r
+ ++p_fmr_el->remap_count;\r
+ p_fmr_el->ref_count = 1;\r
+ p_fmr_el->lkey = *p_lkey;\r
+ p_fmr_el->rkey = *p_rkey;\r
+ p_fmr_el->io_virtual_address = *p_vaddr;\r
+ cl_spinlock_acquire(&p_pool->pool_lock);\r
+ cl_qlist_insert_tail(&p_pool->rest_list, &p_fmr_el->list_item);\r
+ p_fmr_el->p_cur_list = &p_pool->rest_list;\r
+ cl_spinlock_release(&p_pool->pool_lock);\r
+\r
+ if (p_pool->cache_bucket) {\r
+ p_fmr_el->io_virtual_address = *p_vaddr;\r
+ p_fmr_el->page_list_len = list_len;\r
+ memcpy(p_fmr_el->page_list, page_list, list_len * sizeof(*page_list));\r
+\r
+ cl_spinlock_acquire(&p_pool->pool_lock);\r
+ bucket = p_pool->cache_bucket + __fmr_hash(p_fmr_el->page_list[0]);\r
+ cl_qlist_insert_head( bucket, &p_fmr_el->cache_node );\r
+ p_fmr_el->in_cash = TRUE;\r
+ cl_spinlock_release(&p_pool->pool_lock);\r
+ }\r
+\r
+end:\r
+ *pp_fmr_el = (mlnx_fmr_pool_el_t)p_fmr_el;\r
+ *p_lkey = p_fmr_el->lkey;\r
+ *p_rkey = p_fmr_el->rkey;\r
+ *p_vaddr = p_fmr_el->io_virtual_address;\r
+ \r
+exit:\r
+ deref_al_obj( &p_pool->obj );\r
+ return status;\r
+}\r
+\r
+\r
+\r
+ib_api_status_t\r
+mlnx_unmap_fmr_pool(\r
+ IN mlnx_fmr_pool_el_t p_fmr_el )\r
+{\r
+ mlnx_fmr_pool_t *p_pool;\r
+\r
+ p_pool = (mlnx_fmr_pool_t*)p_fmr_el->h_pool;\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( (mlnx_fmr_pool_handle_t)p_pool, AL_OBJ_TYPE_H_FMR_POOL ) )\r
+ {\r
+ AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ ref_al_obj( &p_pool->obj );\r
+ \r
+ cl_spinlock_acquire(&p_pool->pool_lock);\r
+\r
+ --p_fmr_el->ref_count;\r
+ if (!p_fmr_el->ref_count) \r
+ {\r
+ if (p_fmr_el->p_cur_list == &p_pool->rest_list)\r
+ cl_qlist_remove_item( p_fmr_el->p_cur_list, &p_fmr_el->list_item );\r
+\r
+ if (p_fmr_el->remap_count < p_pool->max_remaps) \r
+ {\r
+ cl_qlist_insert_tail(&p_pool->free_list,&p_fmr_el->list_item);\r
+ p_fmr_el->p_cur_list = &p_pool->free_list;\r
+ }\r
+ else\r
+ {\r
+ cl_qlist_insert_tail(&p_pool->dirty_list, &p_fmr_el->list_item);\r
+ p_fmr_el->p_cur_list = &p_pool->dirty_list;\r
+ ++p_pool->dirty_len;\r
+ cl_event_signal(&p_pool->do_flush_event);\r
+ }\r
+ }\r
+\r
+ if (p_fmr_el->ref_count < 0)\r
+ {\r
+ AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_FMR_POOL, ("FMR %p has ref count %d < 0\n",p_fmr_el, p_fmr_el->ref_count));\r
+ }\r
+ cl_spinlock_release( &p_pool->pool_lock );\r
+\r
+ deref_al_obj( &p_pool->obj );\r
+ return IB_SUCCESS;\r
+}\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: al_fmr_pool.h 1611 2006-08-20 14:48:55Z sleybo $\r
+ */\r
+\r
+\r
+#if !defined(__AL_FMR_POOL_H__)\r
+#define __AL_FMR_POOL_H__\r
+\r
+#include <complib/cl_qlist.h>\r
+#include <iba/ib_al.h>\r
+#include "al_common.h"\r
+\r
+\r
+/*\r
+ * If an FMR is not in use, then the list member will point to either\r
+ * its pool's free_list (if the FMR can be mapped again; that is,\r
+ * remap_count < pool->max_remaps) or its pool's dirty_list (if the\r
+ * FMR needs to be unmapped before being remapped). In either of\r
+ * these cases it is a bug if the ref_count is not 0. In other words,\r
+ * if ref_count is > 0, then the list member must not be linked into\r
+ * either free_list or dirty_list.\r
+ *\r
+ * The cache_node member is used to link the FMR into a cache bucket\r
+ * (if caching is enabled). This is independent of the reference\r
+ * count of the FMR. When a valid FMR is released, its ref_count is\r
+ * decremented, and if ref_count reaches 0, the FMR is placed in\r
+ * either free_list or dirty_list as appropriate. However, it is not\r
+ * removed from the cache and may be "revived" if a call to\r
+ * ib_fmr_register_physical() occurs before the FMR is remapped. In\r
+ * this case we just increment the ref_count and remove the FMR from\r
+ * free_list/dirty_list.\r
+ *\r
+ * Before we remap an FMR from free_list, we remove it from the cache\r
+ * (to prevent another user from obtaining a stale FMR). When an FMR\r
+ * is released, we add it to the tail of the free list, so that our\r
+ * cache eviction policy is "least recently used."\r
+ *\r
+ * All manipulation of ref_count, list and cache_node is protected by\r
+ * pool_lock to maintain consistency.\r
+ */\r
+\r
+#pragma warning( disable : 4200)\r
+typedef struct _mlnx_fmr_pool_element {\r
+ mlnx_fmr_handle_t h_fmr;\r
+ mlnx_fmr_pool_handle_t h_pool;\r
+ cl_list_item_t list_item;\r
+ cl_qlist_t *p_cur_list;\r
+ cl_list_item_t cache_node;\r
+ boolean_t in_cash;\r
+ int ref_count;\r
+ int remap_count;\r
+ uint64_t io_virtual_address;\r
+ net32_t lkey;\r
+ net32_t rkey;\r
+ int page_list_len;\r
+ uint64_t page_list[0];\r
+} mlnx_fmr_pool_element_t;\r
+#pragma warning( default : 4200)\r
+\r
+\r
+typedef struct _mlnx_fmr_pool {\r
+\r
+ al_obj_t obj; /* Child of ib_al_handle_t */\r
+ cl_spinlock_t pool_lock;\r
+\r
+ int pool_size;\r
+ int max_pages;\r
+ int max_remaps;\r
+ int dirty_watermark;\r
+ int dirty_len;\r
+ cl_qlist_t free_list;\r
+ cl_qlist_t dirty_list;\r
+ cl_qlist_t rest_list; /* those, that not in free and not in dirty */\r
+ cl_qlist_t *cache_bucket;\r
+\r
+ void (*flush_function) (mlnx_fmr_pool_handle_t h_pool,void* arg);\r
+ void *flush_arg;\r
+\r
+ cl_thread_t thread;\r
+ cl_event_t do_flush_event;\r
+ cl_event_t flush_done_event;\r
+ atomic32_t flush_req;\r
+ atomic32_t should_stop;\r
+} mlnx_fmr_pool_t;\r
+\r
+\r
+#endif /* IB_FMR_POOL_H */\r
+\r
CL_ASSERT( status == IB_SUCCESS );\r
\r
h_fmr->h_ci_fmr = NULL;\r
+ h_fmr->p_next = NULL;\r
}\r
}\r
\r
IN const mlnx_fmr_handle_t h_fmr )\r
{\r
ib_api_status_t status;\r
-\r
+ mlnx_fmr_t *p_fmr = (mlnx_fmr_t*)h_fmr;\r
+ mlnx_fmr_t *p_cur_fmr;\r
+ mlnx_fmr_handle_t *p_fmr_array;\r
+ int i;\r
+ \r
AL_ENTER( AL_DBG_MR );\r
\r
if( AL_OBJ_INVALID_HANDLE( h_fmr, AL_OBJ_TYPE_H_FMR ) )\r
return IB_INVALID_FMR_HANDLE;\r
}\r
\r
- ref_al_obj( &h_fmr->obj );\r
- status = verbs_unmap_mlnx_fmr( h_fmr );\r
- deref_al_obj( &h_fmr->obj );\r
+ // calculate the list size\r
+ for ( i=0, p_cur_fmr = p_fmr; p_cur_fmr; p_cur_fmr = p_cur_fmr->p_next)\r
+ i++;\r
+ \r
+ // allocate the array\r
+ p_fmr_array = cl_zalloc((i+1)*sizeof(mlnx_fmr_handle_t));\r
+ if (!p_fmr_array)\r
+ {\r
+ AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_FMR_HANDLE\n") );\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+\r
+ // fill the array\r
+ for ( i=0, p_cur_fmr = p_fmr; p_cur_fmr; p_cur_fmr = p_cur_fmr->p_next)\r
+ {\r
+ p_fmr_array[i++] = p_cur_fmr->h_ci_fmr;\r
+ ref_al_obj( &p_cur_fmr->obj );\r
+ }\r
+ p_fmr_array[i] = NULL;\r
+\r
+ // unmap the array of FMRs\r
+ status = verbs_unmap_mlnx_fmr( h_fmr, p_fmr_array );\r
+\r
+ // deref the objects\r
+ for ( p_cur_fmr = p_fmr; p_cur_fmr; p_cur_fmr = p_cur_fmr->p_next)\r
+ deref_al_obj( &p_cur_fmr->obj );\r
\r
+ cl_free( p_fmr_array );\r
+ \r
AL_EXIT( AL_DBG_MR );\r
return status;\r
}\r
h_fmr->h_ci_fmr = NULL;\r
/* We're good to destroy the object. \r
NOTE: No need to deref the al object , \r
- we are reseting the fmr objb before inserting it back to the pool */\r
+ we are resetting the fmr obj before inserting it back to the pool */\r
\r
h_fmr->obj.pfn_destroy( &h_fmr->obj, NULL );\r
}\r
\r
cl_memcpy( &p_cache->pkey_tbl[idx].tbl,\r
ib_smp_get_payload_ptr( p_mad ),\r
- sizeof(ib_pkey_table_info_t) );\r
+ sizeof(ib_pkey_table_t) );\r
p_cache->pkey_tbl[idx].valid = TRUE;\r
}\r
\r
if( p_mad->method == IB_MAD_METHOD_SET )\r
{\r
if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ),\r
- &p_spl_qp_svc->cache.guid_block[idx].tbl, sizeof(ib_pkey_table_info_t) ) )\r
+ &p_spl_qp_svc->cache.guid_block[idx].tbl, sizeof(ib_guid_info_t) ) )\r
{\r
/* The set is requesting a change. */\r
return IB_NOT_DONE;\r
ib_mad_t *p_mad;\r
ib_mad_element_t *p_mad_resp;\r
ib_smp_t *p_smp;\r
- ib_pkey_table_info_t *p_pkey_table_info;\r
+ ib_pkey_table_t *p_pkey_table;\r
uint16_t idx;\r
ib_api_status_t status;\r
\r
if( p_mad->method == IB_MAD_METHOD_SET )\r
{\r
if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ),\r
- &p_spl_qp_svc->cache.pkey_tbl[idx].tbl, sizeof(ib_pkey_table_info_t) ) )\r
+ &p_spl_qp_svc->cache.pkey_tbl[idx].tbl, sizeof(ib_pkey_table_t) ) )\r
{\r
/* The set is requesting a change. */\r
AL_EXIT( AL_DBG_SMI );\r
else\r
p_smp->status = 0;\r
\r
- p_pkey_table_info = (ib_pkey_table_info_t*)ib_smp_get_payload_ptr( p_smp );\r
+ p_pkey_table = (ib_pkey_table_t*)ib_smp_get_payload_ptr( p_smp );\r
\r
// TODO: do we need lock on the cache ?????\r
\r
\r
/* Copy the cached data. */\r
- cl_memcpy( p_pkey_table_info,\r
- &p_spl_qp_svc->cache.pkey_tbl[idx].tbl, sizeof(ib_pkey_table_info_t) );\r
+ cl_memcpy( p_pkey_table,\r
+ &p_spl_qp_svc->cache.pkey_tbl[idx].tbl, sizeof(ib_pkey_table_t) );\r
\r
status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp );\r
}\r
typedef struct _pkey_block\r
{\r
boolean_t valid;\r
- ib_pkey_table_info_t tbl;\r
+ ib_pkey_table_t tbl;\r
\r
} pkey_block_t;\r
\r
ib_rmpp_set_resp_time\r
ib_sa_mad_get_payload_ptr\r
ib_send_mad\r
-ib_slvl_table_get_vl\r
-ib_slvl_table_set_vl\r
+ib_slvl_table_set\r
+ib_slvl_table_get\r
ib_sminfo_get_priority\r
ib_sminfo_get_state\r
ib_smp_get_payload_ptr\r
p_ifc->map_phys_mlnx_fmr = mlnx_map_phys_fmr;\r
p_ifc->unmap_mlnx_fmr = mlnx_unmap_fmr;\r
p_ifc->destroy_mlnx_fmr = mlnx_destroy_fmr;\r
+ p_ifc->create_mlnx_fmr_pool = mlnx_create_fmr_pool;\r
+ p_ifc->destroy_mlnx_fmr_pool = mlnx_destroy_fmr_pool;\r
+ p_ifc->map_phys_mlnx_fmr_pool = mlnx_map_phys_fmr_pool;\r
+ p_ifc->unmap_mlnx_fmr_pool = mlnx_unmap_fmr_pool;\r
+ p_ifc->flush_mlnx_fmr_pool = mlnx_flush_fmr_pool;\r
p_ifc->create_srq = ib_create_srq;\r
p_ifc->modify_srq = ib_modify_srq;\r
p_ifc->query_srq = ib_query_srq;\r
ServiceBinary = %12%\ibbus.sys\r
LoadOrderGroup = extended base\r
AddReg = Ibbus.ParamsReg\r
+Dependencies = mthca\r
\r
[Iou.ServiceInstall]\r
DisplayName = %Iou.ServiceDesc%\r
typedef struct _pkey_block\r
{\r
boolean_t valid;\r
- ib_pkey_table_info_t tbl;\r
+ ib_pkey_table_t tbl;\r
\r
} mlnx_pkey_block_t;\r
\r
\r
ib_api_status_t\r
mlnx_unmap_fmr (\r
- IN const mlnx_fmr_handle_t h_fmr)\r
+ IN const mlnx_fmr_handle_t *ph_fmr)\r
{\r
- UNUSED_PARAM( h_fmr );\r
+ UNUSED_PARAM( ph_fmr );\r
return IB_UNSUPPORTED;\r
}\r
\r
if( p_mad_in->method == IB_MAD_METHOD_SET )\r
{\r
if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in ),\r
- &p_cache->pkey_tbl[idx].tbl, sizeof(ib_pkey_table_info_t) ) )\r
+ &p_cache->pkey_tbl[idx].tbl, sizeof(ib_pkey_table_t) ) )\r
{\r
/* The set is requesting a change. */\r
return FALSE;\r
\r
/* Copy the cached data. */\r
cl_memcpy( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ),\r
- &p_cache->pkey_tbl[idx].tbl, sizeof(ib_pkey_table_info_t) );\r
+ &p_cache->pkey_tbl[idx].tbl, sizeof(ib_pkey_table_t) );\r
\r
return TRUE;\r
}\r
\r
cl_memcpy( &p_cache->pkey_tbl[idx].tbl,\r
ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ),\r
- sizeof(ib_pkey_table_info_t) );\r
+ sizeof(ib_pkey_table_t) );\r
p_cache->pkey_tbl[idx].valid = TRUE;\r
}\r
\r
extern uint32_t g_tune_pci;\r
extern uint32_t g_processor_affinity;\r
extern uint32_t g_max_DPC_time_us;\r
+extern uint32_t g_profile_qp_num;\r
+extern uint32_t g_profile_rd_out;\r
\r
\r
#define MLNX_MAX_HCA 4\r
typedef struct _pkey_block\r
{\r
boolean_t valid;\r
- ib_pkey_table_info_t tbl;\r
+ ib_pkey_table_t tbl;\r
\r
} mlnx_pkey_block_t;\r
\r
uint32_t g_tune_pci=0; /* 0 - skip tuning PCI configuration space of HCAs */\r
uint32_t g_processor_affinity = 0;\r
uint32_t g_max_DPC_time_us = 10000;\r
+uint32_t g_profile_qp_num = 0;\r
+uint32_t g_profile_rd_out = 0xffffffff;\r
\r
UNICODE_STRING g_param_path;\r
\r
{\r
NTSTATUS status;\r
/* Remember the terminating entry in the table below. */\r
- RTL_QUERY_REGISTRY_TABLE table[8];\r
+ RTL_QUERY_REGISTRY_TABLE table[10];\r
\r
HCA_ENTER( HCA_DBG_DEV );\r
\r
table[6].DefaultData = &g_max_DPC_time_us;\r
table[6].DefaultLength = sizeof(ULONG);\r
\r
+ table[7].Flags = RTL_QUERY_REGISTRY_DIRECT;\r
+ table[7].Name = L"ProfileQpNum";\r
+ table[7].EntryContext = &g_profile_qp_num;\r
+ table[7].DefaultType = REG_DWORD;\r
+ table[7].DefaultData = &g_profile_qp_num;\r
+ table[7].DefaultLength = sizeof(ULONG);\r
+\r
+ table[8].Flags = RTL_QUERY_REGISTRY_DIRECT;\r
+ table[8].Name = L"ProfileRdOut";\r
+ table[8].EntryContext = &g_profile_rd_out;\r
+ table[8].DefaultType = REG_DWORD;\r
+ table[8].DefaultData = &g_profile_rd_out;\r
+ table[8].DefaultLength = sizeof(ULONG);\r
+\r
/* Have at it! */\r
status = RtlQueryRegistryValues( RTL_REGISTRY_ABSOLUTE, \r
g_param_path.Buffer, table, NULL, NULL );\r
\r
HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_INIT, \r
- ("debug level %d debug flags 0x%.8x SkipTavorReset %d DisableTavorReset %d TunePci %d"\r
- "g_processor_affinity %d g_max_DPC_time_us%d\n",\r
- g_mthca_dbg_level , g_mthca_dbg_flags,\r
+ ("debug level %d debug flags 0x%.8x SkipTavorReset %d DisableTavorReset %d TunePci %d"\r
+ "g_processor_affinity %d g_max_DPC_time_us %d g_profile_qp_num %d g_profile_rd_out %d\n",\r
+ g_mthca_dbg_level, g_mthca_dbg_flags,\r
g_skip_tavor_reset, g_disable_tavor_reset,\r
- g_tune_pci, g_processor_affinity, g_max_DPC_time_us ));\r
+ g_tune_pci, g_processor_affinity, g_max_DPC_time_us,\r
+ g_profile_qp_num, g_profile_rd_out ));\r
\r
HCA_EXIT( HCA_DBG_DEV );\r
return status;\r
// resource tracking\r
p_context->p_mdl = p_mdl;\r
p_context->va = ua;\r
- \r
+\r
+#if 0 \r
+ HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,\r
+ ("MTHCA: __map_crspace succeeded with .ka %I64x, size %I64x va %I64x, size %x, pa %I64x \n",\r
+ p_ext->bar[HCA_BAR_TYPE_HCR].virt, p_ext->bar[HCA_BAR_TYPE_HCR].size, \r
+ p_res->va, p_res->size, p_ext->bar[HCA_BAR_TYPE_HCR].phys ));\r
+#endif\r
status = STATUS_SUCCESS;\r
goto out;\r
\r
status = IB_INVALID_PARAMETER;\r
goto err_invalid_parm; \r
}\r
- //TODO: temporary limitation, till implementing somewhat like Gen2's FMR_POOL\r
- if (p_fmr_create->max_maps != 1) {\r
- status = IB_INVALID_PARAMETER;\r
- goto err_invalid_parm; \r
- }\r
+ // TODO: check Max remap in AL\r
\r
// prepare parameters\r
RtlZeroMemory(&fmr_attr, sizeof(struct ib_fmr_attr));\r
\r
ib_api_status_t\r
mlnx_unmap_fmr (\r
- IN const mlnx_fmr_handle_t h_fmr)\r
+ IN const mlnx_fmr_handle_t *ph_fmr)\r
{\r
ib_api_status_t status;\r
int err;\r
- struct ib_fmr *ib_fmr = (struct ib_fmr *)h_fmr;\r
+ struct ib_fmr *ib_fmr = (struct ib_fmr *)*ph_fmr;\r
struct list_head fmr_list;\r
PREP_IBDEV_FOR_PRINT(ib_fmr->device);\r
\r
status = IB_UNSUPPORTED;\r
goto err_unsupported;\r
} \r
- \r
- // deregister \r
+\r
INIT_LIST_HEAD(&fmr_list);\r
- list_add_tail(&ib_fmr->list, &fmr_list);\r
+ while(*ph_fmr)\r
+ {\r
+ ib_fmr = (struct ib_fmr*)*ph_fmr;\r
+ list_add_tail(&ib_fmr->list, &fmr_list);\r
+ ph_fmr ++;\r
+ }\r
+\r
err = ibv_unmap_fmr(&fmr_list);\r
if (err) {\r
status = errno_to_iberr(err);\r
HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,\r
- ("ibv_unmap_fmr failed (%d) for fmr %p\n", err, h_fmr));\r
+ ("ibv_unmap_fmr failed (%d) \n", err));\r
goto err_unmap_fmr;\r
}\r
\r
err = ibv_dealloc_fmr((struct ib_fmr *)h_fmr);\r
if (err) {\r
status = errno_to_iberr(err);\r
- HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,\r
- ("ibv_dealloc_fmr failed (%d) for mr %p\n", err, h_fmr));\r
+ HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_MEMORY ,\r
+ ("ibv_dealloc_fmr failed (%d) for mr %p\n",err, h_fmr));\r
goto err_dealloc_fmr;\r
}\r
\r
p_interface->deregister_mr = mlnx_deregister_mr;\r
}\r
\r
+\r
[MTHCA.DDInstall.ntx86]\r
CopyFiles = MTHCA.CopyFiles\r
CopyFiles = MTHCA.UMCopyFiles\r
+CopyINF=ib_bus.inf\r
\r
[MTHCA.DDInstall.ntamd64]\r
CopyFiles = MTHCA.CopyFiles\r
CopyFiles = MTHCA.UMCopyFiles\r
CopyFiles = MTHCA.WOW64CopyFiles\r
+CopyINF=ib_bus.inf\r
\r
[MTHCA.DDInstall.ntia64]\r
CopyFiles = MTHCA.CopyFiles\r
CopyFiles = MTHCA.UMCopyFiles\r
CopyFiles = MTHCA.WOW64CopyFiles\r
+CopyINF=ib_bus.inf\r
\r
[MTHCA.DDInstall.ntx86.Services]\r
AddService = mthca,%SPSVCINST_ASSOCSERVICE%,MTHCA.ServiceInstall,MTHCA.EventLog\r
HKR,"Parameters","TunePci",%REG_DWORD%,0\r
HKR,"Parameters","ProcessorAffinity",%REG_DWORD%,0\r
HKR,"Parameters","MaxDpcTimeUs",%REG_DWORD%,10000\r
+HKR,"Parameters","ProfileQpNum",%REG_DWORD%,0\r
+HKR,"Parameters","ProfileRdOut",%REG_DWORD%,0xffffffff\r
HKLM,"System\CurrentControlSet\Control\WMI\GlobalLogger\8bf1f640-63fe-4743-b9ef-fa38c695bfde","Flags",%REG_DWORD%,0xffff\r
HKLM,"System\CurrentControlSet\Control\WMI\GlobalLogger\8bf1f640-63fe-4743-b9ef-fa38c695bfde","Level",%REG_DWORD%,0x3\r
\r
profile = default_profile;
profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
profile.uarc_size = 0;
+
+ /* correct default profile */
+ if ( g_profile_qp_num != 0 )
+ profile.num_qp = g_profile_qp_num;
+
+ if ( g_profile_rd_out != 0xffffffff )
+ profile.rdb_per_qp = g_profile_rd_out;
+
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
profile.num_srq = dev_lim.max_srqs;
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
profile.num_srq = dev_lim.max_srqs;
+ /* correct default profile */
+ if ( g_profile_qp_num != 0 )
+ profile.num_qp = g_profile_qp_num;
+
+ if ( g_profile_rd_out != 0xffffffff )
+ profile.rdb_per_qp = g_profile_rd_out;
+
RtlZeroMemory( &init_hca, sizeof(init_hca));
icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
if ((int) icm_size < 0) {
}
int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
- mthca_mpt_access_t access, struct mthca_fmr *mr)
+ mthca_mpt_access_t access, struct mthca_fmr *fmr)
{
struct mthca_mpt_entry *mpt_entry;
struct mthca_mailbox *mailbox;
u64 mtt_seg;
u32 key, idx;
u8 status;
- int list_len = mr->attr.max_pages;
+ int list_len = fmr->attr.max_pages;
int err = -ENOMEM;
int i;
CPU_2_BE64_PREP;
- if (mr->attr.page_shift < 12 || mr->attr.page_shift >= 32)
+ if (fmr->attr.page_shift < 12 || fmr->attr.page_shift >= 32)
return -EINVAL;
/* For Arbel, all MTTs must fit in the same page. */
if (mthca_is_memfree(dev) &&
- mr->attr.max_pages * sizeof *mr->mem.arbel.mtts > PAGE_SIZE)
+ fmr->attr.max_pages * sizeof *fmr->mem.arbel.mtts > PAGE_SIZE)
return -EINVAL;
- mr->maps = 0;
+ fmr->maps = 0;
key = mthca_alloc(&dev->mr_table.mpt_alloc);
if (key == -1)
key = adjust_key(dev, key);
idx = key & (dev->limits.num_mpts - 1);
- mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
+ fmr->ibfmr.rkey = fmr->ibfmr.lkey = hw_index_to_key(dev, key);
if (mthca_is_memfree(dev)) {
err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
if (err)
goto err_out_mpt_free;
- mr->mem.arbel.mpt = mthca_table_find(dev->mr_table.mpt_table, key);
- BUG_ON(!mr->mem.arbel.mpt);
+ fmr->mem.arbel.mpt = mthca_table_find(dev->mr_table.mpt_table, key);
+ BUG_ON(!fmr->mem.arbel.mpt);
} else
- mr->mem.tavor.mpt = (struct mthca_mpt_entry*)((u8*)dev->mr_table.tavor_fmr.mpt_base +
- sizeof *(mr->mem.tavor.mpt) * idx);
+ fmr->mem.tavor.mpt = (struct mthca_mpt_entry*)((u8*)dev->mr_table.tavor_fmr.mpt_base +
+ sizeof *(fmr->mem.tavor.mpt) * idx);
- mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
- if (IS_ERR(mr->mtt))
+ fmr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
+ if (IS_ERR(fmr->mtt))
goto err_out_table;
- mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE;
+ mtt_seg =fmr->mtt->first_seg * MTHCA_MTT_SEG_SIZE;
if (mthca_is_memfree(dev)) {
- mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
- mr->mtt->first_seg);
- BUG_ON(!mr->mem.arbel.mtts);
+ fmr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
+ fmr->mtt->first_seg);
+ BUG_ON(!fmr->mem.arbel.mtts);
} else
- mr->mem.tavor.mtts = (u64*)((u8*)dev->mr_table.tavor_fmr.mtt_base + mtt_seg);
+ fmr->mem.tavor.mtts = (u64*)((u8*)dev->mr_table.tavor_fmr.mtt_base + mtt_seg);
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
MTHCA_MPT_FLAG_REGION |
access);
- mpt_entry->page_size = cl_hton32(mr->attr.page_shift - 12);
+ mpt_entry->page_size = cl_hton32(fmr->attr.page_shift - 12);
mpt_entry->key = cl_hton32(key);
mpt_entry->pd = cl_hton32(pd);
RtlZeroMemory(&mpt_entry->start,
mpt_entry->mtt_seg = CPU_2_BE64(dev->mr_table.mtt_base + mtt_seg);
{
- HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("Dumping MPT entry %08x:\n", mr->ibmr.lkey));
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("Dumping MPT entry %08x:\n", fmr->ibfmr.lkey));
for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; i=i+4) {
HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("[%02x] %08x %08x %08x %08x \n",i,
cl_ntoh32(((__be32 *) mpt_entry)[i]),
mthca_free_mailbox(dev, mailbox);
err_out_free_mtt:
- mthca_free_mtt(dev, mr->mtt);
+ mthca_free_mtt(dev, fmr->mtt);
err_out_table:
mthca_table_put(dev, dev->mr_table.mpt_table, key);
err_out_mpt_free:
- mthca_free(&dev->mr_table.mpt_alloc, mr->ibmr.lkey);
+ mthca_free(&dev->mr_table.mpt_alloc, fmr->ibfmr.lkey);
return err;
}
if (fmr->maps)
return -EBUSY;
- mthca_free_region(dev, fmr->ibmr.lkey);
+ mthca_free_region(dev, fmr->ibfmr.lkey);
mthca_free_mtt(dev, fmr->mtt);
return 0;
++fmr->maps;
- key = tavor_key_to_hw_index(fmr->ibmr.lkey);
+ key = tavor_key_to_hw_index(fmr->ibfmr.lkey);
key += dev->limits.num_mpts;
- fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key);
+ fmr->ibfmr.lkey = fmr->ibfmr.rkey = tavor_hw_index_to_key(key);
writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
++fmr->maps;
- key = arbel_key_to_hw_index(fmr->ibmr.lkey);
+ key = arbel_key_to_hw_index(fmr->ibfmr.lkey);
if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
key += SINAI_FMR_KEY_INC;
else
key += dev->limits.num_mpts;
- fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
+ fmr->ibfmr.lkey = fmr->ibfmr.rkey = arbel_hw_index_to_key(key);
*(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
if (!fmr->maps)
return;
- key = tavor_key_to_hw_index(fmr->ibmr.lkey);
+ key = tavor_key_to_hw_index(fmr->ibfmr.lkey);
key &= dev->limits.num_mpts - 1;
- fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key);
+ fmr->ibfmr.lkey = fmr->ibfmr.rkey = tavor_hw_index_to_key(key);
fmr->maps = 0;
if (!fmr->maps)
return;
- key = arbel_key_to_hw_index(fmr->ibmr.lkey);
+ key = arbel_key_to_hw_index(fmr->ibfmr.lkey);
key &= dev->limits.num_mpts - 1;
- fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
+ fmr->ibfmr.lkey = fmr->ibfmr.rkey = arbel_hw_index_to_key(key);
fmr->maps = 0;
mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);
}
+
int mthca_dealloc_ucontext(struct ib_ucontext *context)
{
- struct mthca_ucontext *mucontext = to_mucontext(context);
+ struct mthca_ucontext *mucontext = to_mucontext(context);
if (mthca_is_livefish(to_mdev(context->device)))
goto done;
return ERR_PTR(err);
}
- return &fmr->ibmr;
+ return &fmr->ibfmr;
}
static int mthca_dealloc_fmr(struct ib_fmr *fmr)
};
struct mthca_fmr {
- struct ib_fmr ibmr;
+ struct ib_fmr ibfmr;
struct ib_fmr_attr attr;
struct mthca_mtt *mtt;
int maps;
return container_of(ibucontext, struct mthca_ucontext, ibucontext);
}
-static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr)
+static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibfmr)
{
- return container_of(ibmr, struct mthca_fmr, ibmr);
+ return container_of(ibfmr, struct mthca_fmr, ibfmr);
}
static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)
\r
#include <iba/ib_types.h>\r
#include <complib/cl_waitobj.h>\r
-\r
+#include <complib/cl_qlist.h>\r
\r
#ifdef __cplusplus\r
extern "C"\r
typedef struct _al_svc_entry* __ptr64 ib_svc_handle_t;\r
typedef struct _al_pool_key* __ptr64 ib_pool_key_t;\r
typedef struct _al_pool* __ptr64 ib_pool_handle_t;\r
-\r
+typedef struct _mlnx_fmr_pool_element* __ptr64 mlnx_fmr_pool_el_t;\r
\r
typedef struct _ib_cm_handle\r
{\r
* mlnx_destroy_fmr, mlnx_fmr_create_t\r
*****/\r
\r
+\r
+/****f* Access Layer/ib_create_fmr_pool\r
+* NAME\r
+* ib_create_fmr_pool\r
+*\r
+* DESCRIPTION\r
+* Creates a pool of FMR elements for use \r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT ib_api_status_t AL_API\r
+mlnx_create_fmr_pool(\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN const mlnx_fmr_pool_create_t *p_fmr_pool_attr,\r
+ OUT mlnx_fmr_pool_handle_t* const ph_pool );\r
+/*\r
+* PARAMETERS\r
+TODO\r
+*\r
+* RETURN VALUES\r
+TODO\r
+*\r
+* NOTES\r
+TODO\r
+*\r
+* SEE ALSO\r
+TOD\r
+*****/\r
+\r
+\r
+/****f* Access Layer/ib_destroy_fmr_pool\r
+* NAME\r
+* ib_destroy_fmr_pool\r
+*\r
+* DESCRIPTION\r
+* Destroys a MAD pool and all associated resources.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT ib_api_status_t AL_API\r
+mlnx_destroy_fmr_pool(\r
+ IN const mlnx_fmr_pool_handle_t h_pool );\r
+/*\r
+* PARAMETERS\r
+TODO\r
+*\r
+* RETURN VALUES\r
+TODO\r
+*\r
+* NOTES\r
+TODO\r
+*\r
+* SEE ALSO\r
+TODO\r
+*****/\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+/****f* Access Layer/ib_fmr_pool_map_phys\r
+* NAME\r
+* ib_destroy_fmr_pool\r
+*\r
+* DESCRIPTION\r
+* Destroys a MAD pool and all associated resources.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT ib_api_status_t AL_API\r
+mlnx_map_phys_fmr_pool(\r
+ IN const mlnx_fmr_pool_handle_t h_pool ,\r
+ IN const uint64_t* const paddr_list,\r
+ IN const int list_len,\r
+ IN OUT uint64_t* const p_vaddr,\r
+ OUT net32_t* const p_lkey,\r
+ OUT net32_t* const p_rkey,\r
+ OUT mlnx_fmr_pool_el_t *pp_fmr_el);\r
+/*\r
+* PARAMETERS\r
+TODO\r
+*\r
+* RETURN VALUES\r
+TODO\r
+*\r
+* NOTES\r
+TODO\r
+*\r
+* SEE ALSO\r
+TODO\r
+*****/\r
+\r
+\r
+\r
+\r
+\r
+/****f* Access Layer/ib_destroy_fmr_pool\r
+* NAME\r
+* ib_destroy_fmr_pool\r
+*\r
+* DESCRIPTION\r
+* Destroys a MAD pool and all associated resources.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_EXPORT ib_api_status_t AL_API\r
+mlnx_unmap_fmr_pool(\r
+ IN mlnx_fmr_pool_el_t p_fmr_el );\r
+/*\r
+* PARAMETERS\r
+TODO\r
+*\r
+* RETURN VALUES\r
+TODO\r
+*\r
+* NOTES\r
+TODO\r
+*\r
+* SEE ALSO\r
+TODO\r
+*****/\r
+\r
+\r
+/****f* Access Layer/ib_flush_fmr_pool\r
+* NAME\r
+* ib_flush_fmr_pool\r
+*\r
+* DESCRIPTION\r
+* Destroys a MAD pool and all associated resources.\r
+*\r
+* SYNOPSIS\r
+*/\r
+ib_api_status_t\r
+mlnx_flush_fmr_pool(mlnx_fmr_pool_handle_t h_pool);\r
+/*\r
+* PARAMETERS\r
+TODO\r
+*\r
+* RETURN VALUES\r
+TODO\r
+*\r
+* NOTES\r
+TODO\r
+*\r
+* SEE ALSO\r
+TODO\r
+*****/\r
#endif /* CL_KERNEL */\r
\r
/****f* Access Layer/ib_create_mw\r
*/\r
typedef ib_api_status_t\r
(*ci_unmap_mlnx_fmr) (\r
- IN const mlnx_fmr_handle_t h_fmr);\r
+ IN const mlnx_fmr_handle_t *ph_fmr);\r
/*\r
* DESCRIPTION\r
* //TODO\r
/*\r
* Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
* Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved.\r
- * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved.
+ * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved.\r
* Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
*\r
* This software is available to you under the OpenIB.org BSD license\r
/*\r
* MAD methods\r
*/\r
-
+\r
/****d* IBA Base: Constants/IB_MAX_METHOD\r
* NAME\r
* IB_MAX_METHOD\r
*/\r
#define IB_MAX_METHODS 128\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_METHOD_RESP_MASK\r
* NAME\r
* IB_MAD_METHOD_RESP_MASK\r
*/\r
#define IB_MAD_METHOD_RESP_MASK 0x80\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_METHOD_GET\r
* NAME\r
* IB_MAD_METHOD_GET\r
*/\r
#define IB_MAD_METHOD_GET 0x01\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_METHOD_SET\r
* NAME\r
* IB_MAD_METHOD_SET\r
*/\r
#define IB_MAD_METHOD_SET 0x02\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_METHOD_GET_RESP\r
* NAME\r
* IB_MAD_METHOD_GET_RESP\r
*/\r
#define IB_MAD_METHOD_GETTABLE 0x12\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_METHOD_GETTABLE_RESP\r
* NAME\r
* IB_MAD_METHOD_GETTABLE_RESP\r
*/\r
#define IB_MAD_METHOD_SEND 0x03\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_METHOD_TRAP\r
* NAME\r
* IB_MAD_METHOD_TRAP\r
*/\r
#define IB_MAD_METHOD_TRAP 0x05\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_METHOD_REPORT\r
* NAME\r
* IB_MAD_METHOD_REPORT\r
*/\r
#define IB_MAD_METHOD_REPORT 0x06\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_METHOD_REPORT_RESP\r
* NAME\r
* IB_MAD_METHOD_REPORT_RESP\r
*/\r
#define IB_MAD_METHOD_REPORT_RESP 0x86\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_METHOD_TRAP_REPRESS\r
* NAME\r
* IB_MAD_METHOD_TRAP_REPRESS\r
*/\r
#define IB_MAD_METHOD_TRAP_REPRESS 0x07\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_STATUS_BUSY\r
* NAME\r
* IB_MAD_STATUS_BUSY\r
*/\r
#define IB_MAD_STATUS_BUSY (CL_HTON16(0x0001))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_STATUS_REDIRECT\r
* NAME\r
* IB_MAD_STATUS_REDIRECT\r
*/\r
#define IB_MAD_STATUS_REDIRECT (CL_HTON16(0x0002))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_STATUS_UNSUP_CLASS_VER\r
* NAME\r
* IB_MAD_STATUS_UNSUP_CLASS_VER\r
*/\r
#define IB_MAD_STATUS_UNSUP_CLASS_VER (CL_HTON16(0x0004))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_STATUS_UNSUP_METHOD\r
* NAME\r
* IB_MAD_STATUS_UNSUP_METHOD\r
*/\r
#define IB_MAD_STATUS_UNSUP_METHOD (CL_HTON16(0x0008))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_STATUS_UNSUP_METHOD_ATTR\r
* NAME\r
* IB_MAD_STATUS_UNSUP_METHOD_ATTR\r
*/\r
#define IB_MAD_STATUS_UNSUP_METHOD_ATTR (CL_HTON16(0x000C))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_STATUS_INVALID_FIELD\r
* NAME\r
* IB_MAD_STATUS_INVALID_FIELD\r
*/\r
#define IB_MAD_ATTR_CLASS_PORT_INFO (CL_NTOH16(0x0001))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_NOTICE\r
* NAME\r
* IB_MAD_ATTR_NOTICE\r
*/\r
#define IB_MAD_ATTR_NOTICE (CL_NTOH16(0x0002))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_INFORM_INFO\r
* NAME\r
* IB_MAD_ATTR_INFORM_INFO\r
*/\r
#define IB_MAD_ATTR_INFORM_INFO (CL_NTOH16(0x0003))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_NODE_DESC\r
* NAME\r
* IB_MAD_ATTR_NODE_DESC\r
* SOURCE\r
*/\r
#define IB_MAD_ATTR_NODE_DESC (CL_NTOH16(0x0010))\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_PORT_SMPL_CTRL\r
* NAME\r
* IB_MAD_ATTR_PORT_SMPL_CTRL\r
*/\r
#define IB_MAD_ATTR_PORT_SMPL_CTRL (CL_NTOH16(0x0010))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_NODE_INFO\r
* NAME\r
* IB_MAD_ATTR_NODE_INFO\r
*/\r
#define IB_MAD_ATTR_NODE_INFO (CL_NTOH16(0x0011))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_PORT_SMPL_RSLT\r
* NAME\r
* IB_MAD_ATTR_PORT_SMPL_RSLT\r
*/\r
#define IB_MAD_ATTR_PORT_SMPL_RSLT (CL_NTOH16(0x0011))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_SWITCH_INFO\r
* NAME\r
* IB_MAD_ATTR_SWITCH_INFO\r
*/\r
#define IB_MAD_ATTR_SWITCH_INFO (CL_NTOH16(0x0012))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_PORT_CNTRS\r
* NAME\r
* IB_MAD_ATTR_PORT_CNTRS\r
*/\r
#define IB_MAD_ATTR_PORT_CNTRS (CL_NTOH16(0x0012))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_GUID_INFO\r
* NAME\r
* IB_MAD_ATTR_GUID_INFO\r
*/\r
#define IB_MAD_ATTR_GUID_INFO (CL_NTOH16(0x0014))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_PORT_INFO\r
* NAME\r
* IB_MAD_ATTR_PORT_INFO\r
*/\r
#define IB_MAD_ATTR_PORT_INFO (CL_NTOH16(0x0015))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_P_KEY_TABLE\r
* NAME\r
* IB_MAD_ATTR_P_KEY_TABLE\r
*/\r
#define IB_MAD_ATTR_P_KEY_TABLE (CL_NTOH16(0x0016))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_SLVL_TABLE\r
* NAME\r
* IB_MAD_ATTR_SLVL_TABLE\r
*/\r
#define IB_MAD_ATTR_SLVL_TABLE (CL_NTOH16(0x0017))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_VL_ARBITRATION\r
* NAME\r
* IB_MAD_ATTR_VL_ARBITRATION\r
*/\r
#define IB_MAD_ATTR_VL_ARBITRATION (CL_NTOH16(0x0018))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_LIN_FWD_TBL\r
* NAME\r
* IB_MAD_ATTR_LIN_FWD_TBL\r
*/\r
#define IB_MAD_ATTR_LIN_FWD_TBL (CL_NTOH16(0x0019))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_RND_FWD_TBL\r
* NAME\r
* IB_MAD_ATTR_RND_FWD_TBL\r
*/\r
#define IB_MAD_ATTR_RND_FWD_TBL (CL_NTOH16(0x001A))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_MCAST_FWD_TBL\r
* NAME\r
* IB_MAD_ATTR_MCAST_FWD_TBL\r
*/\r
#define IB_MAD_ATTR_NODE_RECORD (CL_NTOH16(0x0011))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_PORTINFO_RECORD\r
* NAME\r
* IB_MAD_ATTR_PORTINFO_RECORD\r
#define IB_MAD_ATTR_PORTINFO_RECORD (CL_NTOH16(0x0012))\r
/**********/\r
\r
-/****d* IBA Base: Constants/IB_MAD_ATTR_SWITCH_INFO_RECORD
-* NAME
-* IB_MAD_ATTR_SWITCH_INFO_RECORD
-*
-* DESCRIPTION
-* SwitchInfoRecord attribute (15.2.5)
-*
-* SOURCE
-*/
-#define IB_MAD_ATTR_SWITCH_INFO_RECORD (CL_NTOH16(0x0014))
-/**********/
+/****d* IBA Base: Constants/IB_MAD_ATTR_SWITCH_INFO_RECORD\r
+* NAME\r
+* IB_MAD_ATTR_SWITCH_INFO_RECORD\r
+*\r
+* DESCRIPTION\r
+* SwitchInfoRecord attribute (15.2.5)\r
+*\r
+* SOURCE\r
+*/\r
+#define IB_MAD_ATTR_SWITCH_INFO_RECORD (CL_NTOH16(0x0014))\r
+/**********/\r
\r
/****d* IBA Base: Constants/IB_MAD_ATTR_LINK_RECORD\r
* NAME\r
* IB_MAD_ATTR_SMINFO_RECORD\r
*\r
* DESCRIPTION\r
-* SMInfoRecord attribute (15.2.5)
+* SMInfoRecord attribute (15.2.5)\r
*\r
* SOURCE\r
*/\r
*/\r
#define IB_MAD_ATTR_VENDOR_DIAG (CL_NTOH16(0x0030))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_LED_INFO\r
* NAME\r
* IB_MAD_ATTR_LED_INFO\r
*/\r
#define IB_MAD_ATTR_LED_INFO (CL_NTOH16(0x0031))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_SERVICE_RECORD\r
* NAME\r
* IB_MAD_ATTR_SERVICE_RECORD\r
*/\r
#define IB_MAD_ATTR_SERVICE_RECORD (CL_NTOH16(0x0031))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_LFT_RECORD\r
* NAME\r
* IB_MAD_ATTR_LFT_RECORD\r
*/\r
#define IB_MAD_ATTR_LFT_RECORD (CL_NTOH16(0x0015))\r
/**********/\r
-
-/****d* IBA Base: Constants/IB_MAD_ATTR_MFT_RECORD
-* NAME
-* IB_MAD_ATTR_MFT_RECORD
-*
-* DESCRIPTION
-* MulticastForwardingTableRecord attribute (15.2.5.8)
-*
-* SOURCE
-*/
-#define IB_MAD_ATTR_MFT_RECORD (CL_NTOH16(0x0017))
-/**********/
-
+\r
+/****d* IBA Base: Constants/IB_MAD_ATTR_MFT_RECORD\r
+* NAME\r
+* IB_MAD_ATTR_MFT_RECORD\r
+*\r
+* DESCRIPTION\r
+* MulticastForwardingTableRecord attribute (15.2.5.8)\r
+*\r
+* SOURCE\r
+*/\r
+#define IB_MAD_ATTR_MFT_RECORD (CL_NTOH16(0x0017))\r
+/**********/\r
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_PKEYTBL_RECORD\r
* NAME\r
* IB_MAD_ATTR_PKEYTBL_RECORD\r
*/\r
#define IB_MAD_ATTR_PKEY_TBL_RECORD (CL_NTOH16(0x0033))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_PATH_RECORD\r
* NAME\r
* IB_MAD_ATTR_PATH_RECORD\r
*/\r
#define IB_MAD_ATTR_PATH_RECORD (CL_NTOH16(0x0035))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_VLARB_RECORD\r
* NAME\r
* IB_MAD_ATTR_VLARB_RECORD\r
*/\r
#define IB_MAD_ATTR_VLARB_RECORD (CL_NTOH16(0x0036))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_SLVL_RECORD\r
* NAME\r
* IB_MAD_ATTR_SLVL_RECORD\r
*/\r
#define IB_MAD_ATTR_SLVL_RECORD (CL_NTOH16(0x0013))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_MCMEMBER_RECORD\r
* NAME\r
* IB_MAD_ATTR_MCMEMBER_RECORD\r
*/\r
#define IB_MAD_ATTR_MCMEMBER_RECORD (CL_NTOH16(0x0038))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_TRACE_RECORD\r
* NAME\r
-* IB_MAD_ATTR_TRACE_RECORD
+* IB_MAD_ATTR_TRACE_RECORD\r
*\r
* DESCRIPTION\r
* TraceRecord attribute (15.2.5)\r
*/\r
#define IB_MAD_ATTR_TRACE_RECORD (CL_NTOH16(0x0039))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_MULTIPATH_RECORD\r
* NAME\r
* IB_MAD_ATTR_MULTIPATH_RECORD\r
*/\r
#define IB_MAD_ATTR_IO_UNIT_INFO (CL_NTOH16(0x0010))\r
/**********/\r
-
-/****d* IBA Base: Constants/IB_MAD_ATTR_IO_CONTROLLER_PROFILE
+\r
+/****d* IBA Base: Constants/IB_MAD_ATTR_IO_CONTROLLER_PROFILE\r
* NAME\r
-* IB_MAD_ATTR_IO_CONTROLLER_PROFILE
+* IB_MAD_ATTR_IO_CONTROLLER_PROFILE\r
*\r
* DESCRIPTION\r
* IOControllerProfile attribute (16.3.3)\r
*\r
* SOURCE\r
*/\r
-#define IB_MAD_ATTR_IO_CONTROLLER_PROFILE (CL_NTOH16(0x0011))
+#define IB_MAD_ATTR_IO_CONTROLLER_PROFILE (CL_NTOH16(0x0011))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_SERVICE_ENTRIES\r
* NAME\r
* IB_MAD_ATTR_SERVICE_ENTRIES\r
*/\r
#define IB_MAD_ATTR_SERVICE_ENTRIES (CL_NTOH16(0x0012))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_DIAGNOSTIC_TIMEOUT\r
* NAME\r
* IB_MAD_ATTR_DIAGNOSTIC_TIMEOUT\r
*/\r
#define IB_MAD_ATTR_DIAGNOSTIC_TIMEOUT (CL_NTOH16(0x0020))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_PREPARE_TO_TEST\r
* NAME\r
* IB_MAD_ATTR_PREPARE_TO_TEST\r
*/\r
#define IB_MAD_ATTR_PREPARE_TO_TEST (CL_NTOH16(0x0021))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_TEST_DEVICE_ONCE\r
* NAME\r
* IB_MAD_ATTR_TEST_DEVICE_ONCE\r
*/\r
#define IB_MAD_ATTR_TEST_DEVICE_ONCE (CL_NTOH16(0x0022))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_TEST_DEVICE_LOOP\r
* NAME\r
* IB_MAD_ATTR_TEST_DEVICE_LOOP\r
*/\r
#define IB_MAD_ATTR_TEST_DEVICE_LOOP (CL_NTOH16(0x0023))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_MAD_ATTR_DIAG_CODE\r
* NAME\r
* IB_MAD_ATTR_DIAG_CODE\r
*/\r
#define IB_NODE_TYPE_CA 0x01\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_NODE_TYPE_SWITCH\r
* NAME\r
* IB_NODE_TYPE_SWITCH\r
*/\r
#define IB_NODE_TYPE_SWITCH 0x02\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_NODE_TYPE_ROUTER\r
* NAME\r
* IB_NODE_TYPE_ROUTER\r
*/\r
#define IB_NOTICE_NODE_TYPE_CA (CL_NTOH32(0x000001))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_NOTICE_NODE_TYPE_SWITCH\r
* NAME\r
* IB_NOTICE_NODE_TYPE_SWITCH\r
*/\r
#define IB_NOTICE_NODE_TYPE_SWITCH (CL_NTOH32(0x000002))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_NOTICE_NODE_TYPE_ROUTER\r
* NAME\r
* IB_NOTICE_NODE_TYPE_ROUTER\r
*/\r
#define IB_NOTICE_NODE_TYPE_ROUTER (CL_NTOH32(0x000003))\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_NOTICE_NODE_TYPE_SUBN_MGMT\r
* NAME\r
* IB_NOTICE_NODE_TYPE_SUBN_MGMT\r
#define IB_NOTICE_NODE_TYPE_SUBN_MGMT (CL_NTOH32(0x000004))\r
/**********/\r
\r
-/****d* IBA Base: Constants/IB_MTU_LEN_TYPE
+/****d* IBA Base: Constants/IB_MTU_LEN_TYPE\r
* NAME\r
-* IB_MTU_LEN_TYPE
+* IB_MTU_LEN_TYPE\r
*\r
* DESCRIPTION\r
* Encoded path MTU.\r
*\r
* SOURCE\r
*/\r
-#define IB_MTU_LEN_256 1
-#define IB_MTU_LEN_512 2
-#define IB_MTU_LEN_1024 3
-#define IB_MTU_LEN_2048 4
-#define IB_MTU_LEN_4096 5
-
-#define IB_MIN_MTU IB_MTU_LEN_256
-#define IB_MAX_MTU IB_MTU_LEN_4096
-
+#define IB_MTU_LEN_256 1\r
+#define IB_MTU_LEN_512 2\r
+#define IB_MTU_LEN_1024 3\r
+#define IB_MTU_LEN_2048 4\r
+#define IB_MTU_LEN_4096 5\r
+\r
+#define IB_MIN_MTU IB_MTU_LEN_256\r
+#define IB_MAX_MTU IB_MTU_LEN_4096\r
+\r
/**********/\r
\r
/****d* IBA Base: Constants/IB_PATH_SELECTOR_TYPE\r
*/\r
#define IB_SMINFO_STATE_NOTACTIVE 0\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_SMINFO_STATE_DISCOVERING\r
* NAME\r
* IB_SMINFO_STATE_DISCOVERING\r
*/\r
#define IB_SMINFO_STATE_DISCOVERING 1\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_SMINFO_STATE_STANDBY\r
* NAME\r
* IB_SMINFO_STATE_STANDBY\r
*/\r
#define IB_SMINFO_STATE_STANDBY 2\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_SMINFO_STATE_MASTER\r
* NAME\r
* IB_SMINFO_STATE_MASTER\r
*/\r
#define IB_SMINFO_STATE_MASTER 3\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_PATH_REC_SELECTOR_MASK\r
* NAME\r
* IB_PATH_REC_SELECTOR_MASK\r
* SOURCE\r
*/\r
#define IB_PATH_REC_SELECTOR_MASK 0xC0\r
-
-/****d* IBA Base: Constants/IB_MULTIPATH_REC_SELECTOR_MASK
-* NAME
-* IB_MULTIPATH_REC_SELECTOR_MASK
-*
-* DESCRIPTION
-* Mask for the selector field for multipath record MTU, rate,
-* and packet lifetime.
-*
-* SOURCE
-*/
-#define IB_MULTIPATH_REC_SELECTOR_MASK 0xC0
+\r
+/****d* IBA Base: Constants/IB_MULTIPATH_REC_SELECTOR_MASK\r
+* NAME\r
+* IB_MULTIPATH_REC_SELECTOR_MASK\r
+*\r
+* DESCRIPTION\r
+* Mask for the selector field for multipath record MTU, rate,\r
+* and packet lifetime.\r
+*\r
+* SOURCE\r
+*/\r
+#define IB_MULTIPATH_REC_SELECTOR_MASK 0xC0\r
/**********/\r
-
+\r
/****d* IBA Base: Constants/IB_PATH_REC_BASE_MASK\r
* NAME\r
* IB_PATH_REC_BASE_MASK\r
* Definitions are from the InfiniBand Architecture Specification v1.2\r
*\r
*********/\r
-
+\r
/****d* IBA Base: Types/ib_net16_t\r
* NAME\r
* ib_net16_t\r
*/\r
typedef uint16_t ib_net16_t;\r
/**********/\r
-
+\r
/****d* IBA Base: Types/ib_net32_t\r
* NAME\r
* ib_net32_t\r
*/\r
typedef uint32_t ib_net32_t;\r
/**********/\r
-
+\r
/****d* IBA Base: Types/ib_net64_t\r
* NAME\r
* ib_net64_t\r
*/\r
typedef uint64_t ib_net64_t;\r
/**********/\r
-
+\r
/****d* IBA Base: Types/ib_gid_prefix_t\r
* NAME\r
* ib_gid_prefix_t\r
*\r
* SEE ALSO\r
*********/\r
-
+\r
/****f* IBA Base: Types/ib_pkey_is_full_member\r
* NAME\r
* ib_pkey_is_full_member\r
{\r
if (ib_pkey_get_base(pkey) == 0x0000)\r
return TRUE;\r
-
+\r
return FALSE;\r
}\r
/*\r
*\r
* SEE ALSO\r
*********/\r
-
-/****f* IBA Base: Types/ib_gid_is_multicast
-* NAME
-* ib_gid_is_multicast
-*
-* DESCRIPTION
-* Returns a boolean indicating whether a GID is a multicast GID.
-*
-* SYNOPSIS
+\r
+/****f* IBA Base: Types/ib_gid_is_multicast\r
+* NAME\r
+* ib_gid_is_multicast\r
+*\r
+* DESCRIPTION\r
+* Returns a boolean indicating whether a GID is a multicast GID.\r
+*\r
+* SYNOPSIS\r
*/\r
AL_INLINE boolean_t AL_API\r
ib_gid_is_multicast(\r
return( p_gid->raw[0] == 0xFF );\r
}\r
\r
-/****f* IBA Base: Types/ib_gid_get_scope
-* NAME
-* ib_gid_get_scope
-*
-* DESCRIPTION
-* Returns scope of (assumed) multicast GID.
-*
-* SYNOPSIS
-*/
+/****f* IBA Base: Types/ib_gid_get_scope\r
+* NAME\r
+* ib_gid_get_scope\r
+*\r
+* DESCRIPTION\r
+* Returns scope of (assumed) multicast GID.\r
+*\r
+* SYNOPSIS\r
+*/\r
AL_INLINE uint8_t AL_API\r
-ib_mgid_get_scope(
- IN const ib_gid_t* p_gid )
-{
- return( p_gid->raw[1] & 0x0F );
-}
-
-/****f* IBA Base: Types/ib_gid_set_scope
-* NAME
-* ib_gid_set_scope
-*
-* DESCRIPTION
-* Sets scope of (assumed) multicast GID.
-*
-* SYNOPSIS
-*/
+ib_mgid_get_scope(\r
+ IN const ib_gid_t* p_gid )\r
+{\r
+ return( p_gid->raw[1] & 0x0F );\r
+}\r
+\r
+/****f* IBA Base: Types/ib_gid_set_scope\r
+* NAME\r
+* ib_gid_set_scope\r
+*\r
+* DESCRIPTION\r
+* Sets scope of (assumed) multicast GID.\r
+*\r
+* SYNOPSIS\r
+*/\r
AL_INLINE void AL_API\r
-ib_mgid_set_scope(
- IN ib_gid_t* const p_gid,
- IN const uint8_t scope )
-{
- p_gid->raw[1] &= 0xF0;
- p_gid->raw[1] |= scope & 0x0F;
-}
-
+ib_mgid_set_scope(\r
+ IN ib_gid_t* const p_gid,\r
+ IN const uint8_t scope )\r
+{\r
+ p_gid->raw[1] &= 0xF0;\r
+ p_gid->raw[1] |= scope & 0x0F;\r
+}\r
+\r
/****f* IBA Base: Types/ib_gid_set_default\r
* NAME\r
* ib_gid_set_default\r
* SEE ALSO\r
* ib_gid_t\r
*********/\r
-
+\r
/****f* IBA Base: Types/ib_gid_get_subnet_prefix\r
* NAME\r
* ib_gid_get_subnet_prefix\r
* SEE ALSO\r
* ib_gid_t\r
*********/\r
-
+\r
/****f* IBA Base: Types/ib_gid_is_link_local\r
* NAME\r
* ib_gid_is_link_local\r
* SEE ALSO\r
* ib_gid_t\r
*********/\r
-
+\r
/****f* IBA Base: Types/ib_gid_is_site_local\r
* NAME\r
* ib_gid_is_site_local\r
* [in] Pointer to the GID object.\r
*\r
* RETURN VALUES\r
-* Returns TRUE if the unicast GID scoping indicates site local,
+* Returns TRUE if the unicast GID scoping indicates site local,\r
* FALSE otherwise.\r
*\r
* NOTES\r
* SEE ALSO\r
* ib_gid_t\r
*********/\r
-
+\r
/****f* IBA Base: Types/ib_gid_get_guid\r
* NAME\r
* ib_gid_get_guid\r
* Another global routing parameter.\r
*\r
* num_path\r
-* Reversible path - 1 bit to say if path is reversible.
-* num_path [6:0] In queries, maximum number of paths to return.
+* Reversible path - 1 bit to say if path is reversible.\r
+* num_path [6:0] In queries, maximum number of paths to return.\r
* In responses, undefined.\r
*\r
* pkey\r
* SEE ALSO\r
*********/\r
\r
-/* Path Record Component Masks */
-#define IB_PR_COMPMASK_DGID (CL_HTON64(((uint64_t)1)<<2))
-#define IB_PR_COMPMASK_SGID (CL_HTON64(((uint64_t)1)<<3))
-#define IB_PR_COMPMASK_DLID (CL_HTON64(((uint64_t)1)<<4))
-#define IB_PR_COMPMASK_SLID (CL_HTON64(((uint64_t)1)<<5))
-#define IB_PR_COMPMASK_RAWTRAFFIC (CL_HTON64(((uint64_t)1)<<6))
-#define IB_PR_COMPMASK_RESV0 (CL_HTON64(((uint64_t)1)<<7))
-#define IB_PR_COMPMASK_FLOWLABEL (CL_HTON64(((uint64_t)1)<<8))
-#define IB_PR_COMPMASK_HOPLIMIT (CL_HTON64(((uint64_t)1)<<9))
-#define IB_PR_COMPMASK_TCLASS (CL_HTON64(((uint64_t)1)<<10))
-#define IB_PR_COMPMASK_REVERSIBLE (CL_HTON64(((uint64_t)1)<<11))
+/* Path Record Component Masks */\r
+#define IB_PR_COMPMASK_DGID (CL_HTON64(((uint64_t)1)<<2))\r
+#define IB_PR_COMPMASK_SGID (CL_HTON64(((uint64_t)1)<<3))\r
+#define IB_PR_COMPMASK_DLID (CL_HTON64(((uint64_t)1)<<4))\r
+#define IB_PR_COMPMASK_SLID (CL_HTON64(((uint64_t)1)<<5))\r
+#define IB_PR_COMPMASK_RAWTRAFFIC (CL_HTON64(((uint64_t)1)<<6))\r
+#define IB_PR_COMPMASK_RESV0 (CL_HTON64(((uint64_t)1)<<7))\r
+#define IB_PR_COMPMASK_FLOWLABEL (CL_HTON64(((uint64_t)1)<<8))\r
+#define IB_PR_COMPMASK_HOPLIMIT (CL_HTON64(((uint64_t)1)<<9))\r
+#define IB_PR_COMPMASK_TCLASS (CL_HTON64(((uint64_t)1)<<10))\r
+#define IB_PR_COMPMASK_REVERSIBLE (CL_HTON64(((uint64_t)1)<<11))\r
#define IB_PR_COMPMASK_NUM_PATH (CL_HTON64(((uint64_t)1)<<12))\r
-#define IB_PR_COMPMASK_PKEY (CL_HTON64(((uint64_t)1)<<13))
-#define IB_PR_COMPMASK_RESV1 (CL_HTON64(((uint64_t)1)<<14))
-#define IB_PR_COMPMASK_SL (CL_HTON64(((uint64_t)1)<<15))
-#define IB_PR_COMPMASK_MTUSELEC (CL_HTON64(((uint64_t)1)<<16))
-#define IB_PR_COMPMASK_MTU (CL_HTON64(((uint64_t)1)<<17))
-#define IB_PR_COMPMASK_RATESELEC (CL_HTON64(((uint64_t)1)<<18))
-#define IB_PR_COMPMASK_RATE (CL_HTON64(((uint64_t)1)<<19))
-#define IB_PR_COMPMASK_PKTLIFETIMESELEC (CL_HTON64(((uint64_t)1)<<20))
-#define IB_PR_COMPMASK_PKTLIFETIME (CL_HTON64(((uint64_t)1)<<21))
+#define IB_PR_COMPMASK_PKEY (CL_HTON64(((uint64_t)1)<<13))\r
+#define IB_PR_COMPMASK_RESV1 (CL_HTON64(((uint64_t)1)<<14))\r
+#define IB_PR_COMPMASK_SL (CL_HTON64(((uint64_t)1)<<15))\r
+#define IB_PR_COMPMASK_MTUSELEC (CL_HTON64(((uint64_t)1)<<16))\r
+#define IB_PR_COMPMASK_MTU (CL_HTON64(((uint64_t)1)<<17))\r
+#define IB_PR_COMPMASK_RATESELEC (CL_HTON64(((uint64_t)1)<<18))\r
+#define IB_PR_COMPMASK_RATE (CL_HTON64(((uint64_t)1)<<19))\r
+#define IB_PR_COMPMASK_PKTLIFETIMESELEC (CL_HTON64(((uint64_t)1)<<20))\r
+#define IB_PR_COMPMASK_PKTLIFETIME (CL_HTON64(((uint64_t)1)<<21))\r
\r
/* Link Record Component Masks */\r
#define IB_LR_COMPMASK_FROM_LID (CL_HTON64(((uint64_t)1)<<0))\r
#define IB_LR_COMPMASK_TO_PORT (CL_HTON64(((uint64_t)1)<<2))\r
#define IB_LR_COMPMASK_TO_LID (CL_HTON64(((uint64_t)1)<<3))\r
\r
-/* VL Arbitration Record Masks */
+/* VL Arbitration Record Masks */\r
#define IB_VLA_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0))\r
#define IB_VLA_COMPMASK_OUT_PORT (CL_HTON64(((uint64_t)1)<<1))\r
#define IB_VLA_COMPMASK_BLOCK (CL_HTON64(((uint64_t)1)<<2))\r
#define IB_PKEY_COMPMASK_BLOCK (CL_HTON64(((uint64_t)1)<<1))\r
#define IB_PKEY_COMPMASK_PORT (CL_HTON64(((uint64_t)1)<<2))\r
\r
-/* Switch Info Record Masks */
-#define IB_SWIR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0))
-#define IB_SWIR_COMPMASK_RESERVED1 (CL_HTON64(((uint64_t)1)<<1))
-
-/* LFT Record Masks */
+/* Switch Info Record Masks */\r
+#define IB_SWIR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0))\r
+#define IB_SWIR_COMPMASK_RESERVED1 (CL_HTON64(((uint64_t)1)<<1))\r
+\r
+/* LFT Record Masks */\r
#define IB_LFTR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0))\r
#define IB_LFTR_COMPMASK_BLOCK (CL_HTON64(((uint64_t)1)<<1))\r
\r
-/* MFT Record Masks */
-#define IB_MFTR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0))
-#define IB_MFTR_COMPMASK_POSITION (CL_HTON64(((uint64_t)1)<<1))
-#define IB_MFTR_COMPMASK_RESERVED1 (CL_HTON64(((uint64_t)1)<<2))
-#define IB_MFTR_COMPMASK_BLOCK (CL_HTON64(((uint64_t)1)<<3))
-#define IB_MFTR_COMPMASK_RESERVED2 (CL_HTON64(((uint64_t)1)<<4))
-
+/* MFT Record Masks */\r
+#define IB_MFTR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0))\r
+#define IB_MFTR_COMPMASK_POSITION (CL_HTON64(((uint64_t)1)<<1))\r
+#define IB_MFTR_COMPMASK_RESERVED1 (CL_HTON64(((uint64_t)1)<<2))\r
+#define IB_MFTR_COMPMASK_BLOCK (CL_HTON64(((uint64_t)1)<<3))\r
+#define IB_MFTR_COMPMASK_RESERVED2 (CL_HTON64(((uint64_t)1)<<4))\r
+\r
/* NodeInfo Record Masks */\r
#define IB_NR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0))\r
#define IB_NR_COMPMASK_RESERVED1 (CL_HTON64(((uint64_t)1)<<1))\r
#define IB_SR_COMPMASK_SDATA32_3 (CL_HTON64(((uint64_t)1)<<34))\r
#define IB_SR_COMPMASK_SDATA64_0 (CL_HTON64(((uint64_t)1)<<35))\r
#define IB_SR_COMPMASK_SDATA64_1 (CL_HTON64(((uint64_t)1)<<36))\r
-
+\r
/* Port Info Record Component Masks */\r
-#define IB_PIR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0))
-#define IB_PIR_COMPMASK_PORTNUM (CL_HTON64(((uint64_t)1)<<1))
-#define IB_PIR_COMPMASK_RESV1 (CL_HTON64(((uint64_t)1)<<2))
-#define IB_PIR_COMPMASK_MKEY (CL_HTON64(((uint64_t)1)<<3))
-#define IB_PIR_COMPMASK_GIDPRE (CL_HTON64(((uint64_t)1)<<4))
-#define IB_PIR_COMPMASK_BASELID (CL_HTON64(((uint64_t)1)<<5))
-#define IB_PIR_COMPMASK_SMLID (CL_HTON64(((uint64_t)1)<<6))
-#define IB_PIR_COMPMASK_CAPMASK (CL_HTON64(((uint64_t)1)<<7))
-#define IB_PIR_COMPMASK_DIAGCODE (CL_HTON64(((uint64_t)1)<<8))
-#define IB_PIR_COMPMASK_MKEYLEASEPRD (CL_HTON64(((uint64_t)1)<<9))
-#define IB_PIR_COMPMASK_LOCALPORTNUM (CL_HTON64(((uint64_t)1)<<10))
-#define IB_PIR_COMPMASK_LINKWIDTHENABLED (CL_HTON64(((uint64_t)1)<<11))
-#define IB_PIR_COMPMASK_LNKWIDTHSUPPORT (CL_HTON64(((uint64_t)1)<<12))
-#define IB_PIR_COMPMASK_LNKWIDTHACTIVE (CL_HTON64(((uint64_t)1)<<13))
-#define IB_PIR_COMPMASK_LNKSPEEDSUPPORT (CL_HTON64(((uint64_t)1)<<14))
-#define IB_PIR_COMPMASK_PORTSTATE (CL_HTON64(((uint64_t)1)<<15))
-#define IB_PIR_COMPMASK_PORTPHYSTATE (CL_HTON64(((uint64_t)1)<<16))
-#define IB_PIR_COMPMASK_LINKDWNDFLTSTATE (CL_HTON64(((uint64_t)1)<<17))
-#define IB_PIR_COMPMASK_MKEYPROTBITS (CL_HTON64(((uint64_t)1)<<18))
-#define IB_PIR_COMPMASK_RESV2 (CL_HTON64(((uint64_t)1)<<19))
-#define IB_PIR_COMPMASK_LMC (CL_HTON64(((uint64_t)1)<<20))
-#define IB_PIR_COMPMASK_LINKSPEEDACTIVE (CL_HTON64(((uint64_t)1)<<21))
-#define IB_PIR_COMPMASK_LINKSPEEDENABLE (CL_HTON64(((uint64_t)1)<<22))
-#define IB_PIR_COMPMASK_NEIGHBORMTU (CL_HTON64(((uint64_t)1)<<23))
-#define IB_PIR_COMPMASK_MASTERSMSL (CL_HTON64(((uint64_t)1)<<24))
-#define IB_PIR_COMPMASK_VLCAP (CL_HTON64(((uint64_t)1)<<25))
-#define IB_PIR_COMPMASK_INITTYPE (CL_HTON64(((uint64_t)1)<<26))
-#define IB_PIR_COMPMASK_VLHIGHLIMIT (CL_HTON64(((uint64_t)1)<<27))
-#define IB_PIR_COMPMASK_VLARBHIGHCAP (CL_HTON64(((uint64_t)1)<<28))
-#define IB_PIR_COMPMASK_VLARBLOWCAP (CL_HTON64(((uint64_t)1)<<29))
-#define IB_PIR_COMPMASK_INITTYPEREPLY (CL_HTON64(((uint64_t)1)<<30))
-#define IB_PIR_COMPMASK_MTUCAP (CL_HTON64(((uint64_t)1)<<31))
-#define IB_PIR_COMPMASK_VLSTALLCNT (CL_HTON64(((uint64_t)1)<<32))
-#define IB_PIR_COMPMASK_HOQLIFE (CL_HTON64(((uint64_t)1)<<33))
-#define IB_PIR_COMPMASK_OPVLS (CL_HTON64(((uint64_t)1)<<34))
-#define IB_PIR_COMPMASK_PARENFIN (CL_HTON64(((uint64_t)1)<<35))
-#define IB_PIR_COMPMASK_PARENFOUT (CL_HTON64(((uint64_t)1)<<36))
-#define IB_PIR_COMPMASK_FILTERRAWIN (CL_HTON64(((uint64_t)1)<<37))
-#define IB_PIR_COMPMASK_FILTERRAWOUT (CL_HTON64(((uint64_t)1)<<38))
-#define IB_PIR_COMPMASK_MKEYVIO (CL_HTON64(((uint64_t)1)<<39))
-#define IB_PIR_COMPMASK_PKEYVIO (CL_HTON64(((uint64_t)1)<<40))
-#define IB_PIR_COMPMASK_QKEYVIO (CL_HTON64(((uint64_t)1)<<41))
-#define IB_PIR_COMPMASK_GUIDCAP (CL_HTON64(((uint64_t)1)<<42))
-#define IB_PIR_COMPMASK_RESV3 (CL_HTON64(((uint64_t)1)<<43))
-#define IB_PIR_COMPMASK_SUBNTO (CL_HTON64(((uint64_t)1)<<44))
-#define IB_PIR_COMPMASK_RESV4 (CL_HTON64(((uint64_t)1)<<45))
-#define IB_PIR_COMPMASK_RESPTIME (CL_HTON64(((uint64_t)1)<<46))
-#define IB_PIR_COMPMASK_LOCALPHYERR (CL_HTON64(((uint64_t)1)<<47))
-#define IB_PIR_COMPMASK_OVERRUNERR (CL_HTON64(((uint64_t)1)<<48))
-
+#define IB_PIR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0))\r
+#define IB_PIR_COMPMASK_PORTNUM (CL_HTON64(((uint64_t)1)<<1))\r
+#define IB_PIR_COMPMASK_RESV1 (CL_HTON64(((uint64_t)1)<<2))\r
+#define IB_PIR_COMPMASK_MKEY (CL_HTON64(((uint64_t)1)<<3))\r
+#define IB_PIR_COMPMASK_GIDPRE (CL_HTON64(((uint64_t)1)<<4))\r
+#define IB_PIR_COMPMASK_BASELID (CL_HTON64(((uint64_t)1)<<5))\r
+#define IB_PIR_COMPMASK_SMLID (CL_HTON64(((uint64_t)1)<<6))\r
+#define IB_PIR_COMPMASK_CAPMASK (CL_HTON64(((uint64_t)1)<<7))\r
+#define IB_PIR_COMPMASK_DIAGCODE (CL_HTON64(((uint64_t)1)<<8))\r
+#define IB_PIR_COMPMASK_MKEYLEASEPRD (CL_HTON64(((uint64_t)1)<<9))\r
+#define IB_PIR_COMPMASK_LOCALPORTNUM (CL_HTON64(((uint64_t)1)<<10))\r
+#define IB_PIR_COMPMASK_LINKWIDTHENABLED (CL_HTON64(((uint64_t)1)<<11))\r
+#define IB_PIR_COMPMASK_LNKWIDTHSUPPORT (CL_HTON64(((uint64_t)1)<<12))\r
+#define IB_PIR_COMPMASK_LNKWIDTHACTIVE (CL_HTON64(((uint64_t)1)<<13))\r
+#define IB_PIR_COMPMASK_LNKSPEEDSUPPORT (CL_HTON64(((uint64_t)1)<<14))\r
+#define IB_PIR_COMPMASK_PORTSTATE (CL_HTON64(((uint64_t)1)<<15))\r
+#define IB_PIR_COMPMASK_PORTPHYSTATE (CL_HTON64(((uint64_t)1)<<16))\r
+#define IB_PIR_COMPMASK_LINKDWNDFLTSTATE (CL_HTON64(((uint64_t)1)<<17))\r
+#define IB_PIR_COMPMASK_MKEYPROTBITS (CL_HTON64(((uint64_t)1)<<18))\r
+#define IB_PIR_COMPMASK_RESV2 (CL_HTON64(((uint64_t)1)<<19))\r
+#define IB_PIR_COMPMASK_LMC (CL_HTON64(((uint64_t)1)<<20))\r
+#define IB_PIR_COMPMASK_LINKSPEEDACTIVE (CL_HTON64(((uint64_t)1)<<21))\r
+#define IB_PIR_COMPMASK_LINKSPEEDENABLE (CL_HTON64(((uint64_t)1)<<22))\r
+#define IB_PIR_COMPMASK_NEIGHBORMTU (CL_HTON64(((uint64_t)1)<<23))\r
+#define IB_PIR_COMPMASK_MASTERSMSL (CL_HTON64(((uint64_t)1)<<24))\r
+#define IB_PIR_COMPMASK_VLCAP (CL_HTON64(((uint64_t)1)<<25))\r
+#define IB_PIR_COMPMASK_INITTYPE (CL_HTON64(((uint64_t)1)<<26))\r
+#define IB_PIR_COMPMASK_VLHIGHLIMIT (CL_HTON64(((uint64_t)1)<<27))\r
+#define IB_PIR_COMPMASK_VLARBHIGHCAP (CL_HTON64(((uint64_t)1)<<28))\r
+#define IB_PIR_COMPMASK_VLARBLOWCAP (CL_HTON64(((uint64_t)1)<<29))\r
+#define IB_PIR_COMPMASK_INITTYPEREPLY (CL_HTON64(((uint64_t)1)<<30))\r
+#define IB_PIR_COMPMASK_MTUCAP (CL_HTON64(((uint64_t)1)<<31))\r
+#define IB_PIR_COMPMASK_VLSTALLCNT (CL_HTON64(((uint64_t)1)<<32))\r
+#define IB_PIR_COMPMASK_HOQLIFE (CL_HTON64(((uint64_t)1)<<33))\r
+#define IB_PIR_COMPMASK_OPVLS (CL_HTON64(((uint64_t)1)<<34))\r
+#define IB_PIR_COMPMASK_PARENFIN (CL_HTON64(((uint64_t)1)<<35))\r
+#define IB_PIR_COMPMASK_PARENFOUT (CL_HTON64(((uint64_t)1)<<36))\r
+#define IB_PIR_COMPMASK_FILTERRAWIN (CL_HTON64(((uint64_t)1)<<37))\r
+#define IB_PIR_COMPMASK_FILTERRAWOUT (CL_HTON64(((uint64_t)1)<<38))\r
+#define IB_PIR_COMPMASK_MKEYVIO (CL_HTON64(((uint64_t)1)<<39))\r
+#define IB_PIR_COMPMASK_PKEYVIO (CL_HTON64(((uint64_t)1)<<40))\r
+#define IB_PIR_COMPMASK_QKEYVIO (CL_HTON64(((uint64_t)1)<<41))\r
+#define IB_PIR_COMPMASK_GUIDCAP (CL_HTON64(((uint64_t)1)<<42))\r
+#define IB_PIR_COMPMASK_RESV3 (CL_HTON64(((uint64_t)1)<<43))\r
+#define IB_PIR_COMPMASK_SUBNTO (CL_HTON64(((uint64_t)1)<<44))\r
+#define IB_PIR_COMPMASK_RESV4 (CL_HTON64(((uint64_t)1)<<45))\r
+#define IB_PIR_COMPMASK_RESPTIME (CL_HTON64(((uint64_t)1)<<46))\r
+#define IB_PIR_COMPMASK_LOCALPHYERR (CL_HTON64(((uint64_t)1)<<47))\r
+#define IB_PIR_COMPMASK_OVERRUNERR (CL_HTON64(((uint64_t)1)<<48))\r
+\r
/* Multicast Member Record Component Masks */\r
#define IB_MCR_COMPMASK_GID (CL_HTON64(((uint64_t)1)<<0))\r
#define IB_MCR_COMPMASK_MGID (CL_HTON64(((uint64_t)1)<<0))\r
* [in] LID of source port.\r
*\r
* num_path\r
-* [in] Reversible path - 1 bit to say if path is reversible.
-* num_path [6:0] In queries, maximum number of paths to return.
+* [in] Reversible path - 1 bit to say if path is reversible.\r
+* num_path [6:0] In queries, maximum number of paths to return.\r
* In responses, undefined.\r
*\r
* pkey\r
*/\r
#define IB_CLASS_CAP_TRAP 0x0001\r
/*********/\r
-
+\r
/****s* IBA Base: Constants/IB_CLASS_CAP_GETSET\r
* NAME\r
* IB_CLASS_CAP_GETSET\r
*/\r
#define IB_CLASS_CAP_GETSET 0x0002\r
/*********/\r
-
+\r
/****s* IBA Base: Constants/IB_CLASS_RESP_TIME_MASK\r
* NAME\r
* IB_CLASS_RESP_TIME_MASK\r
* IB_CLASS_CAP_GETSET, IB_CLASS_CAP_TRAP\r
*\r
*********/\r
-
+\r
/****s* IBA Base: Types/ib_sm_info_t\r
* NAME\r
* ib_sm_info_t\r
* SEE ALSO\r
* ib_smp_t\r
*********/\r
-
+\r
/****f* IBA Base: Types/ib_smp_is_d\r
* NAME\r
* ib_smp_is_d\r
* SEE ALSO\r
* ib_mad_t\r
*********/\r
-
+\r
/****f* IBA Base: Types/ib_smp_get_payload_ptr\r
* NAME\r
* ib_smp_get_payload_ptr\r
} PACK_SUFFIX ib_node_info_t;\r
#include <complib/cl_packoff.h>\r
/************/\r
-
+\r
/****s* IBA Base: Types/ib_sa_mad_t\r
* NAME\r
* ib_sa_mad_t\r
* SEE ALSO\r
* ib_node_info_t\r
*********/\r
-
+\r
/****f* IBA Base: Types/ib_node_info_get_vendor_id\r
* NAME\r
* ib_node_info_get_vendor_id\r
#define IB_PORT_CAP_HAS_NV_PKEY (CL_NTOH32(0x00000100))\r
#define IB_PORT_CAP_HAS_LED_INFO (CL_NTOH32(0x00000200))\r
#define IB_PORT_CAP_SM_DISAB (CL_NTOH32(0x00000400))\r
-#define IB_PORT_CAP_HAS_SYS_IMG_GUID (CL_NTOH32(0x00000800))
+#define IB_PORT_CAP_HAS_SYS_IMG_GUID (CL_NTOH32(0x00000800))\r
#define IB_PORT_CAP_HAS_PKEY_SW_EXT_PORT_TRAP (CL_NTOH32(0x00001000))\r
#define IB_PORT_CAP_RESV13 (CL_NTOH32(0x00002000))\r
#define IB_PORT_CAP_RESV14 (CL_NTOH32(0x00004000))\r
#define IB_PORT_CAP_RESV15 (CL_NTOH32(0x00008000))\r
#define IB_PORT_CAP_HAS_COM_MGT (CL_NTOH32(0x00010000))\r
#define IB_PORT_CAP_HAS_SNMP (CL_NTOH32(0x00020000))\r
-#define IB_PORT_CAP_REINIT (CL_NTOH32(0x00040000))
+#define IB_PORT_CAP_REINIT (CL_NTOH32(0x00040000))\r
#define IB_PORT_CAP_HAS_DEV_MGT (CL_NTOH32(0x00080000))\r
#define IB_PORT_CAP_HAS_VEND_CLS (CL_NTOH32(0x00100000))\r
#define IB_PORT_CAP_HAS_DR_NTC (CL_NTOH32(0x00200000))\r
#define IB_PORT_CAP_HAS_CAP_NTC (CL_NTOH32(0x00400000))\r
#define IB_PORT_CAP_HAS_BM (CL_NTOH32(0x00800000))\r
-#define IB_PORT_CAP_HAS_LINK_RT_LATENCY (CL_NTOH32(0x01000000))
-#define IB_PORT_CAP_HAS_CLIENT_REREG (CL_NTOH32(0x02000000))
+#define IB_PORT_CAP_HAS_LINK_RT_LATENCY (CL_NTOH32(0x01000000))\r
+#define IB_PORT_CAP_HAS_CLIENT_REREG (CL_NTOH32(0x02000000))\r
#define IB_PORT_CAP_RESV26 (CL_NTOH32(0x04000000))\r
#define IB_PORT_CAP_RESV27 (CL_NTOH32(0x08000000))\r
#define IB_PORT_CAP_RESV28 (CL_NTOH32(0x10000000))\r
*\r
* SEE ALSO\r
*********/\r
-
+\r
/****f* IBA Base: Types/ib_port_info_set_port_state\r
* NAME\r
* ib_port_info_set_port_state\r
*\r
* SEE ALSO\r
*********/\r
-
+\r
/****f* IBA Base: Types/ib_port_info_get_vl_cap\r
* NAME\r
* ib_port_info_get_vl_cap\r
*\r
* SEE ALSO\r
*********/\r
-
+\r
/****f* IBA Base: Types/ib_port_info_set_op_vls\r
* NAME\r
* ib_port_info_set_op_vls\r
*\r
* SEE ALSO\r
*********/\r
-
+\r
/****f* IBA Base: Types/ib_port_info_set_state_no_change\r
* NAME\r
* ib_port_info_set_state_no_change\r
* SEE ALSO\r
*********/\r
\r
-/****f* IBA Base: Types/ib_port_info_get_port_phys_state
-* NAME
-* ib_port_info_get_port_phys_state
-*
-* DESCRIPTION
-* Returns the encoded value for the port physical state.
-*
-* SYNOPSIS
+/****f* IBA Base: Types/ib_port_info_get_port_phys_state\r
+* NAME\r
+* ib_port_info_get_port_phys_state\r
+*\r
+* DESCRIPTION\r
+* Returns the encoded value for the port physical state.\r
+*\r
+* SYNOPSIS\r
*/\r
AL_INLINE uint8_t AL_API\r
-ib_port_info_get_port_phys_state(
- IN const ib_port_info_t* const p_pi )
-{
- return( (uint8_t)((p_pi->state_info2 &
- IB_PORT_PHYS_STATE_MASK) >>
- IB_PORT_PHYS_STATE_SHIFT) );
-}
-/*
-* PARAMETERS
-* p_pi
-* [in] Pointer to a PortInfo attribute.
-*
-* RETURN VALUES
-* Returns the encoded value for the port physical state.
-*
-* NOTES
-*
-* SEE ALSO
-*********/
-
-/****f* IBA Base: Types/ib_port_info_set_port_phys_state
-* NAME
-* ib_port_info_set_port_phys_state
-*
-* DESCRIPTION
-* Given an integer of the port physical state,
-* Set the appropriate bits in state_info2
-*
-* SYNOPSIS
-*/
+ib_port_info_get_port_phys_state(\r
+ IN const ib_port_info_t* const p_pi )\r
+{\r
+ return( (uint8_t)((p_pi->state_info2 &\r
+ IB_PORT_PHYS_STATE_MASK) >>\r
+ IB_PORT_PHYS_STATE_SHIFT) );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_pi\r
+* [in] Pointer to a PortInfo attribute.\r
+*\r
+* RETURN VALUES\r
+* Returns the encoded value for the port physical state.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
+/****f* IBA Base: Types/ib_port_info_set_port_phys_state\r
+* NAME\r
+* ib_port_info_set_port_phys_state\r
+*\r
+* DESCRIPTION\r
+* Given an integer of the port physical state,\r
+* Set the appropriate bits in state_info2\r
+*\r
+* SYNOPSIS\r
+*/\r
AL_INLINE void AL_API\r
-ib_port_info_set_port_phys_state(
- IN uint8_t const phys_state,
- IN ib_port_info_t* p_pi )
-{
- p_pi->state_info2 =
- ( ~IB_PORT_PHYS_STATE_MASK & p_pi->state_info2 ) |
- ( IB_PORT_PHYS_STATE_MASK &
- (phys_state << IB_PORT_PHYS_STATE_SHIFT) );
-}
-/*
-* PARAMETERS
-* phys_state
-* [in] port physical state.
-*
-* p_pi
-* [in] Pointer to a PortInfo attribute.
-*
-* RETURN VALUES
-* This function does not return a value.
-*
-* NOTES
-*
-* SEE ALSO
-*********/
-
-/****f* IBA Base: Types/ib_port_info_get_link_down_def_state
-* NAME
-* ib_port_info_get_link_down_def_state
-*
-* DESCRIPTION
-* Returns the link down default state.
-*
-* SYNOPSIS
-*/
+ib_port_info_set_port_phys_state(\r
+ IN uint8_t const phys_state,\r
+ IN ib_port_info_t* p_pi )\r
+{\r
+ p_pi->state_info2 =\r
+ ( ~IB_PORT_PHYS_STATE_MASK & p_pi->state_info2 ) |\r
+ ( IB_PORT_PHYS_STATE_MASK &\r
+ (phys_state << IB_PORT_PHYS_STATE_SHIFT) );\r
+}\r
+/*\r
+* PARAMETERS\r
+* phys_state\r
+* [in] port physical state.\r
+*\r
+* p_pi\r
+* [in] Pointer to a PortInfo attribute.\r
+*\r
+* RETURN VALUES\r
+* This function does not return a value.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
+/****f* IBA Base: Types/ib_port_info_get_link_down_def_state\r
+* NAME\r
+* ib_port_info_get_link_down_def_state\r
+*\r
+* DESCRIPTION\r
+* Returns the link down default state.\r
+*\r
+* SYNOPSIS\r
+*/\r
AL_INLINE uint8_t AL_API\r
-ib_port_info_get_link_down_def_state(
- IN const ib_port_info_t* const p_pi )
-{
- return( (uint8_t)(p_pi->state_info2 & IB_PORT_LNKDWNDFTSTATE_MASK) );
-}
-/*
-* PARAMETERS
-* p_pi
-* [in] Pointer to a PortInfo attribute.
-*
-* RETURN VALUES
-* link down default state of the port.
-*
-* NOTES
-*
-* SEE ALSO
-*********/
-
-/****f* IBA Base: Types/ib_port_info_set_link_down_def_state
-* NAME
-* ib_port_info_set_link_down_def_state
-*
-* DESCRIPTION
-* Sets the link down default state of the port.
-*
-* SYNOPSIS
-*/
+ib_port_info_get_link_down_def_state(\r
+ IN const ib_port_info_t* const p_pi )\r
+{\r
+ return( (uint8_t)(p_pi->state_info2 & IB_PORT_LNKDWNDFTSTATE_MASK) );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_pi\r
+* [in] Pointer to a PortInfo attribute.\r
+*\r
+* RETURN VALUES\r
+* link down default state of the port.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
+/****f* IBA Base: Types/ib_port_info_set_link_down_def_state\r
+* NAME\r
+* ib_port_info_set_link_down_def_state\r
+*\r
+* DESCRIPTION\r
+* Sets the link down default state of the port.\r
+*\r
+* SYNOPSIS\r
+*/\r
AL_INLINE void AL_API\r
-ib_port_info_set_link_down_def_state(
- IN ib_port_info_t* const p_pi,
- IN const uint8_t link_dwn_state )
-{
- p_pi->state_info2 = (uint8_t)((p_pi->state_info2 & 0xF0) | link_dwn_state );
-}
-/*
-* PARAMETERS
-* p_pi
-* [in] Pointer to a PortInfo attribute.
-*
-* link_dwn_state
-* [in] Link down default state of the port.
-*
-* RETURN VALUES
-* None.
-*
-* NOTES
-*
-* SEE ALSO
-*********/
-
+ib_port_info_set_link_down_def_state(\r
+ IN ib_port_info_t* const p_pi,\r
+ IN const uint8_t link_dwn_state )\r
+{\r
+ p_pi->state_info2 = (uint8_t)((p_pi->state_info2 & 0xF0) | link_dwn_state );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_pi\r
+* [in] Pointer to a PortInfo attribute.\r
+*\r
+* link_dwn_state\r
+* [in] Link down default state of the port.\r
+*\r
+* RETURN VALUES\r
+* None.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
/****f* IBA Base: Types/ib_port_info_get_link_speed_active\r
* NAME\r
* ib_port_info_get_link_speed_active\r
ib_port_info_get_link_speed_active(\r
IN const ib_port_info_t* const p_pi )\r
{\r
- return( (uint8_t)((p_pi->link_speed &
- IB_PORT_LINK_SPEED_ACTIVE_MASK) >>
+ return( (uint8_t)((p_pi->link_speed &\r
+ IB_PORT_LINK_SPEED_ACTIVE_MASK) >>\r
IB_PORT_LINK_SPEED_SHIFT) );\r
}\r
/*\r
#define IB_LINK_SPEED_ACTIVE_5 2\r
#define IB_LINK_SPEED_ACTIVE_10 4\r
\r
-/* following v1 ver1.2 p901 */
+/* following v1 ver1.2 p901 */\r
#define IB_PATH_RECORD_RATE_2_5_GBS 2\r
#define IB_PATH_RECORD_RATE_10_GBS 3\r
#define IB_PATH_RECORD_RATE_30_GBS 4\r
#define IB_PATH_RECORD_RATE_80_GBS 9\r
#define IB_PATH_RECORD_RATE_120_GBS 10\r
\r
-#define IB_MIN_RATE IB_PATH_RECORD_RATE_2_5_GBS
-#define IB_MAX_RATE IB_PATH_RECORD_RATE_120_GBS
+#define IB_MIN_RATE IB_PATH_RECORD_RATE_2_5_GBS\r
+#define IB_MAX_RATE IB_PATH_RECORD_RATE_120_GBS \r
\r
/****f* IBA Base: Types/ib_port_info_compute_rate\r
* NAME\r
ib_port_info_compute_rate(\r
IN const ib_port_info_t* const p_pi )\r
{\r
- uint8_t rate = 0;
-
- switch (ib_port_info_get_link_speed_active(p_pi))
+ uint8_t rate = 0;\r
+\r
+ switch (ib_port_info_get_link_speed_active(p_pi))\r
{\r
- case IB_LINK_SPEED_ACTIVE_2_5:
- switch (p_pi->link_width_active)
- {
- case IB_LINK_WIDTH_ACTIVE_1X:
- rate = IB_PATH_RECORD_RATE_2_5_GBS;
- break;
-
- case IB_LINK_WIDTH_ACTIVE_4X:
- rate = IB_PATH_RECORD_RATE_10_GBS;
- break;
-\r
- case IB_LINK_WIDTH_ACTIVE_12X:
- rate = IB_PATH_RECORD_RATE_30_GBS;
- break;
-\r
- default:
- rate = IB_PATH_RECORD_RATE_2_5_GBS;
- break;
- }
- break;
- case IB_LINK_SPEED_ACTIVE_5:
- switch (p_pi->link_width_active)
- {
- case IB_LINK_WIDTH_ACTIVE_1X:
- rate = IB_PATH_RECORD_RATE_5_GBS;
- break;
-\r
- case IB_LINK_WIDTH_ACTIVE_4X:
- rate = IB_PATH_RECORD_RATE_20_GBS;
- break;
-\r
- case IB_LINK_WIDTH_ACTIVE_12X:
- rate = IB_PATH_RECORD_RATE_60_GBS;
- break;
-\r
- default:
- rate = IB_PATH_RECORD_RATE_5_GBS;
- break;
- }
- break;
- case IB_LINK_SPEED_ACTIVE_10:
- switch (p_pi->link_width_active)
- {
- case IB_LINK_WIDTH_ACTIVE_1X:
- rate = IB_PATH_RECORD_RATE_10_GBS;
- break;
-\r
- case IB_LINK_WIDTH_ACTIVE_4X:
- rate = IB_PATH_RECORD_RATE_40_GBS;
- break;
-\r
- case IB_LINK_WIDTH_ACTIVE_12X:
- rate =IB_PATH_RECORD_RATE_120_GBS;
- break;
+ case IB_LINK_SPEED_ACTIVE_2_5:\r
+ switch (p_pi->link_width_active)\r
+ {\r
+ case IB_LINK_WIDTH_ACTIVE_1X:\r
+ rate = IB_PATH_RECORD_RATE_2_5_GBS;\r
+ break;\r
+ \r
+ case IB_LINK_WIDTH_ACTIVE_4X:\r
+ rate = IB_PATH_RECORD_RATE_10_GBS;\r
+ break;\r
+\r
+ case IB_LINK_WIDTH_ACTIVE_12X:\r
+ rate = IB_PATH_RECORD_RATE_30_GBS;\r
+ break;\r
+\r
+ default:\r
+ rate = IB_PATH_RECORD_RATE_2_5_GBS;\r
+ break;\r
+ }\r
+ break;\r
+ case IB_LINK_SPEED_ACTIVE_5:\r
+ switch (p_pi->link_width_active)\r
+ {\r
+ case IB_LINK_WIDTH_ACTIVE_1X:\r
+ rate = IB_PATH_RECORD_RATE_5_GBS;\r
+ break;\r
+\r
+ case IB_LINK_WIDTH_ACTIVE_4X:\r
+ rate = IB_PATH_RECORD_RATE_20_GBS;\r
+ break;\r
+\r
+ case IB_LINK_WIDTH_ACTIVE_12X:\r
+ rate = IB_PATH_RECORD_RATE_60_GBS;\r
+ break;\r
+\r
+ default:\r
+ rate = IB_PATH_RECORD_RATE_5_GBS;\r
+ break;\r
+ }\r
+ break;\r
+ case IB_LINK_SPEED_ACTIVE_10:\r
+ switch (p_pi->link_width_active)\r
+ {\r
+ case IB_LINK_WIDTH_ACTIVE_1X:\r
+ rate = IB_PATH_RECORD_RATE_10_GBS;\r
+ break;\r
+\r
+ case IB_LINK_WIDTH_ACTIVE_4X:\r
+ rate = IB_PATH_RECORD_RATE_40_GBS;\r
+ break;\r
+\r
+ case IB_LINK_WIDTH_ACTIVE_12X:\r
+ rate =IB_PATH_RECORD_RATE_120_GBS;\r
+ break;\r
\r
default:\r
- rate = IB_PATH_RECORD_RATE_10_GBS;
- break;
- }
- break;
- default:
- rate = IB_PATH_RECORD_RATE_2_5_GBS;
- break;
+ rate = IB_PATH_RECORD_RATE_10_GBS;\r
+ break;\r
+ }\r
+ break;\r
+ default:\r
+ rate = IB_PATH_RECORD_RATE_2_5_GBS;\r
+ break;\r
}\r
-
- return rate;
+\r
+ return rate;\r
}\r
/*\r
* PARAMETERS\r
\r
switch(local_link_width_supported)\r
{\r
- /* link_width_supported = 1: 1x */
+ /* link_width_supported = 1: 1x */\r
case 1:\r
break;\r
\r
- /* link_width_supported = 3: 1x or 4x */
+ /* link_width_supported = 3: 1x or 4x */\r
case 3:\r
switch(path_rec_rate & 0x3F)\r
{\r
}\r
break;\r
\r
- /* link_width_supported = 11: 1x or 4x or 12x */
+ /* link_width_supported = 11: 1x or 4x or 12x */\r
case 11:\r
switch(path_rec_rate & 0x3F)\r
{\r
*\r
* SEE ALSO\r
*********/\r
-
+\r
/****f* IBA Base: Types/ib_port_info_get_neighbor_mtu\r
* NAME\r
* ib_port_info_get_neighbor_mtu\r
*\r
* SEE ALSO\r
*********/\r
-
+\r
/****f* IBA Base: Types/ib_port_info_set_neighbor_mtu\r
* NAME\r
* ib_port_info_set_neighbor_mtu\r
IN const uint8_t timeout )\r
{\r
CL_ASSERT( timeout <= 0x1F );\r
- p_pi->subnet_timeout =
- (uint8_t)(
- (p_pi->subnet_timeout & 0x80) | (timeout & 0x1F));
+ p_pi->subnet_timeout =\r
+ (uint8_t)(\r
+ (p_pi->subnet_timeout & 0x80) | (timeout & 0x1F));\r
}\r
/*\r
* PARAMETERS\r
*\r
* SEE ALSO\r
*********/\r
-
-/****f* IBA Base: Types/ib_port_info_set_client_rereg
-* NAME
-* ib_port_info_set_client_rereg
-*
-* DESCRIPTION
-* Sets the encoded client reregistration bit value in the PortInfo attribute.
-*
-* SYNOPSIS
-*/
-AL_INLINE void AL_API\r
-ib_port_info_set_client_rereg(
- IN ib_port_info_t* const p_pi,
- IN const uint8_t client_rereg )
-{
- CL_ASSERT( client_rereg <= 0x1 );
- p_pi->subnet_timeout =
- (uint8_t)(
- (p_pi->subnet_timeout & 0x1F) | ((client_rereg << 7) & 0x80));
-}
-/*
-* PARAMETERS
-* p_pi
-* [in] Pointer to a PortInfo attribute.
-*
-* client_rereg
-* [in] Client reregistration value to set (either 1 or 0).
-*
-* RETURN VALUES
-* None.
-*
-* NOTES
-*
-* SEE ALSO
-*********/
-
-/****f* IBA Base: Types/ib_port_info_get_timeout
-* NAME
-* ib_port_info_get_timeout
-*
-* DESCRIPTION
-* Gets the encoded subnet timeout value in the PortInfo attribute.
-*
-* SYNOPSIS
-*/
-AL_INLINE uint8_t AL_API\r
-ib_port_info_get_timeout(
- IN ib_port_info_t const* p_pi )
-{
- return(p_pi->subnet_timeout & 0x1F );
-}
-/*
-* PARAMETERS
-* p_pi
-* [in] Pointer to a PortInfo attribute.
-*
-* RETURN VALUES
-* The encoded timeout value
-*
-* NOTES
-*
-* SEE ALSO
-*********/
-
-/****f* IBA Base: Types/ib_port_info_get_client_rereg
-* NAME
-* ib_port_info_get_client_rereg
-*
-* DESCRIPTION
-* Gets the encoded client reregistration bit value in the PortInfo attribute.
-*
-* SYNOPSIS
-*/
-AL_INLINE uint8_t AL_API\r
-ib_port_info_get_client_rereg(
- IN ib_port_info_t const* p_pi )
-{
- return ( (p_pi->subnet_timeout & 0x80 ) >> 7);
-}
-/*
-* PARAMETERS
-* p_pi
-* [in] Pointer to a PortInfo attribute.
-*
-* RETURN VALUES
-* Client reregistration value (either 1 or 0).
-*
-* NOTES
-*
-* SEE ALSO
-*********/\r
-\r
-/****f* IBA Base: Types/ib_port_info_set_hoq_lifetime
-* NAME
-* ib_port_info_set_hoq_lifetime
-*
-* DESCRIPTION
-* Sets the Head of Queue Lifetime for which a packet can live in the head
-* of VL queue
-*
-* SYNOPSIS
-*/
-AL_INLINE void AL_API\r
-ib_port_info_set_hoq_lifetime(
- IN ib_port_info_t* const p_pi,
- IN const uint8_t hoq_life )
-{
- p_pi->vl_stall_life = (uint8_t)((hoq_life & 0x1f) |
- (p_pi->vl_stall_life & 0xe0));
-}
-/*
-* PARAMETERS
-* p_pi
-* [in] Pointer to a PortInfo attribute.
-*
-* hoq_life
-* [in] Encoded lifetime value to set
-*
-* RETURN VALUES
-* None.
-*
-* NOTES
-*
-* SEE ALSO
-*********/
-
-/****f* IBA Base: Types/ib_port_info_get_hoq_lifetime
-* NAME
-* ib_port_info_get_hoq_lifetime
-*
-* DESCRIPTION
-* Gets the Head of Queue Lifetime for which a packet can live in the head
-* of VL queue
-*
-* SYNOPSIS
-*/
-AL_INLINE uint8_t AL_API\r
-ib_port_info_get_hoq_lifetime(
- IN const ib_port_info_t* const p_pi )
-{
- return( (uint8_t)(p_pi->vl_stall_life & 0x1f) );
-}
-
-/*
-* PARAMETERS
-* p_pi
-* [in] Pointer to a PortInfo attribute.
-*
-* RETURN VALUES
-* Encoded lifetime value
-*
-* NOTES
-*
-* SEE ALSO
-*********/
-
-/****f* IBA Base: Types/ib_port_info_set_vl_stall_count
-* NAME
-* ib_port_info_set_vl_stall_count
-*
-* DESCRIPTION
-* Sets the VL Stall Count which define the number of contiguous
-* HLL (hoq) drops that will put the VL into stalled mode.
-*
-* SYNOPSIS
-*/
-AL_INLINE void AL_API\r
-ib_port_info_set_vl_stall_count(
- IN ib_port_info_t* const p_pi,
- IN const uint8_t vl_stall_count )
-{
- p_pi->vl_stall_life = (uint8_t)((p_pi->vl_stall_life & 0x1f) |
- ((vl_stall_count << 5) & 0xe0));
-}
-/*
-* PARAMETERS
-* p_pi
-* [in] Pointer to a PortInfo attribute.
-*
-* vl_stall_count
-* [in] value to set
-*
-* RETURN VALUES
-* None.
-*
-* NOTES
-*
-* SEE ALSO
-*********/
-
-/****f* IBA Base: Types/ib_port_info_get_vl_stall_count
-* NAME
-* ib_port_info_get_vl_stall_count
-*
-* DESCRIPTION
-* Gets the VL Stall Count which define the number of contiguous
-* HLL (hoq) drops that will put the VL into stalled mode
-*
-* SYNOPSIS
-*/
-AL_INLINE uint8_t AL_API\r
-ib_port_info_get_vl_stall_count(
- IN const ib_port_info_t* const p_pi )
-{
- return( (uint8_t)(p_pi->vl_stall_life & 0xe0) >> 5);
-}
-
-/*
-* PARAMETERS
-* p_pi
-* [in] Pointer to a PortInfo attribute.
-*
-* RETURN VALUES
-* vl stall count
-*
-* NOTES
-*
-* SEE ALSO
-*********/
-
-/****f* IBA Base: Types/ib_port_info_get_lmc\r
+\r
+/****f* IBA Base: Types/ib_port_info_set_client_rereg\r
* NAME\r
-* ib_port_info_get_lmc\r
+* ib_port_info_set_client_rereg\r
*\r
* DESCRIPTION\r
-* Returns the LMC value assigned to this port.\r
+* Sets the encoded client reregistration bit value in the PortInfo attribute.\r
*\r
* SYNOPSIS\r
*/\r
-AL_INLINE uint8_t AL_API\r
-ib_port_info_get_lmc(\r
- IN const ib_port_info_t* const p_pi )\r
-{\r
- return( (uint8_t)(p_pi->mkey_lmc & IB_PORT_LMC_MASK) );\r
+AL_INLINE void AL_API\r
+ib_port_info_set_client_rereg(\r
+ IN ib_port_info_t* const p_pi,\r
+ IN const uint8_t client_rereg )\r
+{\r
+ CL_ASSERT( client_rereg <= 0x1 );\r
+ p_pi->subnet_timeout =\r
+ (uint8_t)(\r
+ (p_pi->subnet_timeout & 0x1F) | ((client_rereg << 7) & 0x80));\r
}\r
/*\r
* PARAMETERS\r
* p_pi\r
* [in] Pointer to a PortInfo attribute.\r
*\r
+* client_rereg\r
+* [in] Client reregistration value to set (either 1 or 0).\r
+*\r
* RETURN VALUES\r
-* Returns the LMC value assigned to this port.\r
+* None.\r
*\r
* NOTES\r
*\r
* SEE ALSO\r
*********/\r
-
-/****f* IBA Base: Types/ib_port_info_set_lmc\r
+\r
+/****f* IBA Base: Types/ib_port_info_get_timeout\r
* NAME\r
-* ib_port_info_set_lmc\r
+* ib_port_info_get_timeout\r
*\r
* DESCRIPTION\r
-* Sets the LMC value in the PortInfo attribute.\r
+* Gets the encoded subnet timeout value in the PortInfo attribute.\r
*\r
* SYNOPSIS\r
*/\r
-AL_INLINE void AL_API\r
-ib_port_info_set_lmc(\r
- IN ib_port_info_t* const p_pi,\r
- IN const uint8_t lmc )\r
+AL_INLINE uint8_t AL_API\r
+ib_port_info_get_timeout(\r
+ IN ib_port_info_t const* p_pi )\r
{\r
- CL_ASSERT( lmc <= IB_PORT_LMC_MAX );
- p_pi->mkey_lmc = (uint8_t)((p_pi->mkey_lmc & 0xF8) | lmc);\r
+ return(p_pi->subnet_timeout & 0x1F );\r
}\r
/*\r
* PARAMETERS\r
* p_pi\r
* [in] Pointer to a PortInfo attribute.\r
*\r
-* lmc\r
-* [in] LMC value to set, must be less than 7.\r
-*\r
* RETURN VALUES\r
-* None.\r
+* The encoded timeout value\r
*\r
* NOTES\r
*\r
* SEE ALSO\r
*********/\r
-
-/****f* IBA Base: Types/ib_port_info_get_link_speed_enabled
-* NAME
-* ib_port_info_get_link_speed_enabled
-*
-* DESCRIPTION
-* Returns the link speed enabled value assigned to this port.
-*
-* SYNOPSIS
-*/
-AL_INLINE uint8_t AL_API\r
-ib_port_info_get_link_speed_enabled(
- IN const ib_port_info_t* const p_pi )
-{
- return( (uint8_t)(p_pi->link_speed & IB_PORT_LINK_SPEED_ENABLED_MASK) );
-}
-/*
-* PARAMETERS
-* p_pi
-* [in] Pointer to a PortInfo attribute.
-*
-* RETURN VALUES
-* Port state.
-*
-* NOTES
-*
-* SEE ALSO
-*********/
-
-/****f* IBA Base: Types/ib_port_info_set_link_speed_enabled
-* NAME
-* ib_port_info_set_link_speed_enabled
-*
-* DESCRIPTION
-* Sets the link speed enabled value in the PortInfo attribute.
-*
-* SYNOPSIS
-*/
+\r
+/****f* IBA Base: Types/ib_port_info_get_client_rereg\r
+* NAME\r
+* ib_port_info_get_client_rereg\r
+*\r
+* DESCRIPTION\r
+* Gets the encoded client reregistration bit value in the PortInfo attribute.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_INLINE uint8_t AL_API\r
+ib_port_info_get_client_rereg(\r
+ IN ib_port_info_t const* p_pi )\r
+{\r
+ return ( (p_pi->subnet_timeout & 0x80 ) >> 7);\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_pi\r
+* [in] Pointer to a PortInfo attribute.\r
+*\r
+* RETURN VALUES\r
+* Client reregistration value (either 1 or 0).\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
+/****f* IBA Base: Types/ib_port_info_set_hoq_lifetime\r
+* NAME\r
+* ib_port_info_set_hoq_lifetime\r
+*\r
+* DESCRIPTION\r
+* Sets the Head of Queue Lifetime for which a packet can live in the head \r
+* of VL queue\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_INLINE void AL_API\r
+ib_port_info_set_hoq_lifetime(\r
+ IN ib_port_info_t* const p_pi,\r
+ IN const uint8_t hoq_life )\r
+{\r
+ p_pi->vl_stall_life = (uint8_t)((hoq_life & 0x1f) |\r
+ (p_pi->vl_stall_life & 0xe0));\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_pi\r
+* [in] Pointer to a PortInfo attribute.\r
+*\r
+* hoq_life\r
+* [in] Encoded lifetime value to set\r
+*\r
+* RETURN VALUES\r
+* None.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
+/****f* IBA Base: Types/ib_port_info_get_hoq_lifetime\r
+* NAME\r
+* ib_port_info_get_hoq_lifetime\r
+*\r
+* DESCRIPTION\r
+* Gets the Head of Queue Lifetime for which a packet can live in the head \r
+* of VL queue\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_INLINE uint8_t AL_API\r
+ib_port_info_get_hoq_lifetime(\r
+ IN const ib_port_info_t* const p_pi )\r
+{\r
+ return( (uint8_t)(p_pi->vl_stall_life & 0x1f) );\r
+}\r
+\r
+/*\r
+* PARAMETERS\r
+* p_pi\r
+* [in] Pointer to a PortInfo attribute.\r
+*\r
+* RETURN VALUES\r
+* Encoded lifetime value\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
+/****f* IBA Base: Types/ib_port_info_set_vl_stall_count\r
+* NAME\r
+* ib_port_info_set_vl_stall_count\r
+*\r
+* DESCRIPTION\r
+* Sets the VL Stall Count which define the number of contiguous \r
+* HLL (hoq) drops that will put the VL into stalled mode.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_INLINE void AL_API\r
+ib_port_info_set_vl_stall_count(\r
+ IN ib_port_info_t* const p_pi,\r
+ IN const uint8_t vl_stall_count )\r
+{\r
+ p_pi->vl_stall_life = (uint8_t)((p_pi->vl_stall_life & 0x1f) |\r
+ ((vl_stall_count << 5) & 0xe0));\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_pi\r
+* [in] Pointer to a PortInfo attribute.\r
+*\r
+* vl_stall_count \r
+* [in] value to set\r
+*\r
+* RETURN VALUES\r
+* None.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
+/****f* IBA Base: Types/ib_port_info_get_vl_stall_count\r
+* NAME\r
+* ib_port_info_get_vl_stall_count\r
+*\r
+* DESCRIPTION\r
+* Gets the VL Stall Count which define the number of contiguous \r
+* HLL (hoq) drops that will put the VL into stalled mode\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_INLINE uint8_t AL_API\r
+ib_port_info_get_vl_stall_count(\r
+ IN const ib_port_info_t* const p_pi )\r
+{\r
+ return( (uint8_t)(p_pi->vl_stall_life & 0xe0) >> 5);\r
+}\r
+\r
+/*\r
+* PARAMETERS\r
+* p_pi\r
+* [in] Pointer to a PortInfo attribute.\r
+*\r
+* RETURN VALUES\r
+* vl stall count\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
+/****f* IBA Base: Types/ib_port_info_get_lmc\r
+* NAME\r
+* ib_port_info_get_lmc\r
+*\r
+* DESCRIPTION\r
+* Returns the LMC value assigned to this port.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_INLINE uint8_t AL_API\r
+ib_port_info_get_lmc(\r
+ IN const ib_port_info_t* const p_pi )\r
+{\r
+ return( (uint8_t)(p_pi->mkey_lmc & IB_PORT_LMC_MASK) );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_pi\r
+* [in] Pointer to a PortInfo attribute.\r
+*\r
+* RETURN VALUES\r
+* Returns the LMC value assigned to this port.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
+/****f* IBA Base: Types/ib_port_info_set_lmc\r
+* NAME\r
+* ib_port_info_set_lmc\r
+*\r
+* DESCRIPTION\r
+* Sets the LMC value in the PortInfo attribute.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_INLINE void AL_API\r
+ib_port_info_set_lmc(\r
+ IN ib_port_info_t* const p_pi,\r
+ IN const uint8_t lmc )\r
+{\r
+ CL_ASSERT( lmc <= IB_PORT_LMC_MAX );\r
+ p_pi->mkey_lmc = (uint8_t)((p_pi->mkey_lmc & 0xF8) | lmc);\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_pi\r
+* [in] Pointer to a PortInfo attribute.\r
+*\r
+* lmc\r
+* [in] LMC value to set, must be less than 7.\r
+*\r
+* RETURN VALUES\r
+* None.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
+/****f* IBA Base: Types/ib_port_info_get_link_speed_enabled\r
+* NAME\r
+* ib_port_info_get_link_speed_enabled\r
+*\r
+* DESCRIPTION\r
+* Returns the link speed enabled value assigned to this port.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_INLINE uint8_t AL_API\r
+ib_port_info_get_link_speed_enabled(\r
+ IN const ib_port_info_t* const p_pi )\r
+{\r
+ return( (uint8_t)(p_pi->link_speed & IB_PORT_LINK_SPEED_ENABLED_MASK) );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_pi\r
+* [in] Pointer to a PortInfo attribute.\r
+*\r
+* RETURN VALUES\r
+* Port state.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
+/****f* IBA Base: Types/ib_port_info_set_link_speed_enabled\r
+* NAME\r
+* ib_port_info_set_link_speed_enabled\r
+*\r
+* DESCRIPTION\r
+* Sets the link speed enabled value in the PortInfo attribute.\r
+*\r
+* SYNOPSIS\r
+*/\r
AL_INLINE void AL_API\r
-ib_port_info_set_link_speed_enabled(
- IN ib_port_info_t* const p_pi,
- IN const uint8_t link_speed_enabled )
-{
- p_pi->link_speed = (uint8_t)((p_pi->link_speed & 0xF0) | link_speed_enabled );
-}
-/*
-* PARAMETERS
-* p_pi
-* [in] Pointer to a PortInfo attribute.
-*
-* link_speed_enabled
-* [in] link speed enabled value to set.
-*
-* RETURN VALUES
-* None.
-*
-* NOTES
-*
-* SEE ALSO
-*********/
-
+ib_port_info_set_link_speed_enabled(\r
+ IN ib_port_info_t* const p_pi,\r
+ IN const uint8_t link_speed_enabled )\r
+{\r
+ p_pi->link_speed = (uint8_t)((p_pi->link_speed & 0xF0) | link_speed_enabled );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_pi\r
+* [in] Pointer to a PortInfo attribute.\r
+*\r
+* link_speed_enabled\r
+* [in] link speed enabled value to set.\r
+*\r
+* RETURN VALUES\r
+* None.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
/****f* IBA Base: Types/ib_port_info_get_mpb\r
* NAME\r
* ib_port_info_get_mpb\r
* ib_port_info_set_mpb\r
*\r
* DESCRIPTION\r
-* Set the M_Key protect bits of this port.\r
+* Set the M_Key protect bits of this port.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_INLINE void AL_API\r
+ib_port_info_set_mpb(\r
+ IN ib_port_info_t* p_pi,\r
+ IN uint8_t mpb )\r
+{\r
+ p_pi->mkey_lmc =\r
+ (~IB_PORT_MPB_MASK & p_pi->mkey_lmc) |\r
+ ( IB_PORT_MPB_MASK & (mpb << IB_PORT_MPB_SHIFT) );\r
+}\r
+/*\r
+* PARAMETERS\r
+* mpb\r
+* [in] M_Key protect bits\r
+* p_ni\r
+* [in] Pointer to a PortInfo attribute.\r
+*\r
+* RETURN VALUES\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
+/****f* IBA Base: Types/ib_port_info_get_local_phy_err_thd\r
+* NAME\r
+* ib_port_info_get_local_phy_err_thd\r
+*\r
+* DESCRIPTION\r
+* Returns the Phy Link Threshold\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_INLINE uint8_t AL_API\r
+ib_port_info_get_local_phy_err_thd(\r
+ IN const ib_port_info_t* const p_pi )\r
+{\r
+ return (uint8_t)( (p_pi->error_threshold & 0xF0) >> 4);\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_pi\r
+* [in] Pointer to a PortInfo attribute.\r
+*\r
+* RETURN VALUES\r
+* Returns the Phy Link error threshold assigned to this port.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
+/****f* IBA Base: Types/ib_port_info_get_overrun_err_thd\r
+* NAME\r
+* ib_port_info_get_local_overrun_err_thd\r
+*\r
+* DESCRIPTION\r
+* Returns the Credits Overrun Errors Threshold\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_INLINE uint8_t AL_API\r
+ib_port_info_get_overrun_err_thd(\r
+ IN const ib_port_info_t* const p_pi )\r
+{\r
+ return (uint8_t)(p_pi->error_threshold & 0x0F);\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_pi\r
+* [in] Pointer to a PortInfo attribute.\r
+*\r
+* RETURN VALUES\r
+* Returns the Credits Overrun errors threshold assigned to this port.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
+/****f* IBA Base: Types/ib_port_info_set_phy_and_overrun_err_thd\r
+* NAME\r
+* ib_port_info_set_phy_and_overrun_err_thd\r
+*\r
+* DESCRIPTION\r
+* Sets the Phy Link and Credits Overrun Errors Threshold\r
*\r
* SYNOPSIS\r
*/\r
AL_INLINE void AL_API\r
-ib_port_info_set_mpb(\r
- IN ib_port_info_t* p_pi,\r
- IN uint8_t mpb )\r
+ib_port_info_set_phy_and_overrun_err_thd(\r
+ IN ib_port_info_t* const p_pi,\r
+ IN uint8_t phy_threshold,\r
+ IN uint8_t overrun_threshold )\r
{\r
- p_pi->mkey_lmc =\r
- (~IB_PORT_MPB_MASK & p_pi->mkey_lmc) |
- ( IB_PORT_MPB_MASK & (mpb << IB_PORT_MPB_SHIFT) );
+ p_pi->error_threshold = \r
+ (uint8_t)( ((phy_threshold & 0x0F) << 4) | (overrun_threshold & 0x0F) );\r
}\r
/*\r
* PARAMETERS\r
-* mpb\r
-* [in] M_Key protect bits\r
-* p_ni\r
+* p_pi\r
* [in] Pointer to a PortInfo attribute.\r
*\r
+* phy_threshold\r
+* [in] Physical Link Errors Threshold above which Trap 129 is generated \r
+*\r
+* overrun_threshold\r
+* [in] Credits overrun Errors Threshold above which Trap 129 is generated \r
+*\r
* RETURN VALUES\r
+* None.\r
*\r
* NOTES\r
*\r
* SEE ALSO\r
*********/\r
\r
-/****f* IBA Base: Types/ib_port_info_get_local_phy_err_thd
-* NAME
-* ib_port_info_get_local_phy_err_thd
-*
-* DESCRIPTION
-* Returns the Phy Link Threshold
-*
-* SYNOPSIS
-*/
-AL_INLINE uint8_t AL_API\r
-ib_port_info_get_local_phy_err_thd(
- IN const ib_port_info_t* const p_pi )
-{
- return (uint8_t)( (p_pi->error_threshold & 0xF0) >> 4);
-}
-/*
-* PARAMETERS
-* p_pi
-* [in] Pointer to a PortInfo attribute.
-*
-* RETURN VALUES
-* Returns the Phy Link error threshold assigned to this port.
-*
-* NOTES
-*
-* SEE ALSO
-*********/
-
-/****f* IBA Base: Types/ib_port_info_get_overrun_err_thd
-* NAME
-* ib_port_info_get_local_overrun_err_thd
-*
-* DESCRIPTION
-* Returns the Credits Overrun Errors Threshold
-*
-* SYNOPSIS
-*/
-AL_INLINE uint8_t AL_API\r
-ib_port_info_get_overrun_err_thd(
- IN const ib_port_info_t* const p_pi )
-{
- return (uint8_t)(p_pi->error_threshold & 0x0F);
-}
-/*
-* PARAMETERS
-* p_pi
-* [in] Pointer to a PortInfo attribute.
-*
-* RETURN VALUES
-* Returns the Credits Overrun errors threshold assigned to this port.
-*
-* NOTES
-*
-* SEE ALSO
-*********/
-
-/****f* IBA Base: Types/ib_port_info_set_phy_and_overrun_err_thd
-* NAME
-* ib_port_info_set_phy_and_overrun_err_thd
-*
-* DESCRIPTION
-* Sets the Phy Link and Credits Overrun Errors Threshold
-*
-* SYNOPSIS
-*/
-AL_INLINE void AL_API\r
-ib_port_info_set_phy_and_overrun_err_thd(
- IN ib_port_info_t* const p_pi,
- IN uint8_t phy_threshold,
- IN uint8_t overrun_threshold )
-{
- p_pi->error_threshold =
- (uint8_t)( ((phy_threshold & 0x0F) << 4) | (overrun_threshold & 0x0F) );
-}
-/*
-* PARAMETERS
-* p_pi
-* [in] Pointer to a PortInfo attribute.
-*
-* phy_threshold
-* [in] Physical Link Errors Threshold above which Trap 129 is generated
-*
-* overrun_threshold
-* [in] Credits overrun Errors Threshold above which Trap 129 is generated
-*
-* RETURN VALUES
-* None.
-*
-* NOTES
-*
-* SEE ALSO
-*********/
-\r
typedef uint8_t ib_svc_name_t[64];\r
\r
#include <complib/cl_packon.h>\r
} PACK_SUFFIX ib_sminfo_record_t;\r
#include <complib/cl_packoff.h>\r
\r
-/****s* IBA Base: Types/ib_lft_record_t\r
+/****s* IBA Base: Types/ib_lft_record_t\r
+* NAME\r
+* ib_lft_record_t\r
+*\r
+* DESCRIPTION\r
+* IBA defined LinearForwardingTableRecord (15.2.5.6)\r
+*\r
+* SYNOPSIS\r
+*/\r
+#include <complib/cl_packon.h>\r
+typedef struct _ib_lft_record\r
+{\r
+ ib_net16_t lid;\r
+ ib_net16_t block_num;\r
+ uint32_t resv0;\r
+ uint8_t lft[64];\r
+} PACK_SUFFIX ib_lft_record_t;\r
+#include <complib/cl_packoff.h>\r
+/************/\r
+\r
+/****s* IBA Base: Types/ib_mft_record_t\r
+* NAME\r
+* ib_mft_record_t\r
+*\r
+* DESCRIPTION\r
+* IBA defined MulticastForwardingTableRecord (15.2.5.8)\r
+*\r
+* SYNOPSIS\r
+*/\r
+#include <complib/cl_packon.h>\r
+typedef struct _ib_mft_record\r
+{\r
+ ib_net16_t lid;\r
+ ib_net16_t position_block_num;\r
+ uint32_t resv0;\r
+ ib_net16_t mft[IB_MCAST_BLOCK_SIZE];\r
+} PACK_SUFFIX ib_mft_record_t;\r
+#include <complib/cl_packoff.h>\r
+/************/\r
+\r
+/****s* IBA Base: Types/ib_switch_info_t\r
+* NAME\r
+* ib_switch_info_t\r
+*\r
+* DESCRIPTION\r
+* IBA defined SwitchInfo. (14.2.5.4)\r
+*\r
+* SYNOPSIS\r
+*/\r
+#include <complib/cl_packon.h>\r
+typedef struct _ib_switch_info\r
+{\r
+ ib_net16_t lin_cap;\r
+ ib_net16_t rand_cap;\r
+ ib_net16_t mcast_cap;\r
+ ib_net16_t lin_top;\r
+ uint8_t def_port;\r
+ uint8_t def_mcast_pri_port;\r
+ uint8_t def_mcast_not_port;\r
+ uint8_t life_state;\r
+ ib_net16_t lids_per_port;\r
+ ib_net16_t enforce_cap;\r
+ uint8_t flags;\r
+\r
+} PACK_SUFFIX ib_switch_info_t;\r
+#include <complib/cl_packoff.h>\r
+/************/\r
+\r
+#include <complib/cl_packon.h>\r
+typedef struct _ib_switch_info_record\r
+{\r
+ ib_net16_t lid;\r
+ uint16_t resv0;\r
+ ib_switch_info_t switch_info;\r
+ uint8_t pad[3];\r
+\r
+} PACK_SUFFIX ib_switch_info_record_t;\r
+#include <complib/cl_packoff.h>\r
+\r
+#define IB_SWITCH_PSC 0x04\r
+\r
+/****f* IBA Base: Types/ib_switch_info_get_state_change\r
+* NAME\r
+* ib_switch_info_get_state_change\r
+*\r
+* DESCRIPTION\r
+* Returns the value of the state change flag.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_INLINE boolean_t AL_API\r
+ib_switch_info_get_state_change(\r
+ IN const ib_switch_info_t* const p_si )\r
+{\r
+ return( (p_si->life_state & IB_SWITCH_PSC) == IB_SWITCH_PSC );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_si\r
+* [in] Pointer to a SwitchInfo attribute.\r
+*\r
+* RETURN VALUES\r
+* Returns the value of the state change flag.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
+/****f* IBA Base: Types/ib_switch_info_clear_state_change\r
+* NAME\r
+* ib_switch_info_clear_state_change\r
+*\r
+* DESCRIPTION\r
+* Clears the switch's state change bit.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_INLINE void AL_API\r
+ib_switch_info_clear_state_change(\r
+ IN ib_switch_info_t* const p_si )\r
+{\r
+ p_si->life_state = (uint8_t)(p_si->life_state & 0xFB);\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_ni\r
+* [in] Pointer to a PortInfo attribute.\r
+*\r
+* RETURN VALUES\r
+* Returns the LMC value assigned to this port.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
+/****f* IBA Base: Types/ib_switch_info_is_enhanced_port0\r
+* NAME\r
+* ib_switch_info_is_enhanced_port0\r
+*\r
+* DESCRIPTION\r
+* Returns TRUE if the enhancedPort0 bit is on (meaning the switch\r
+* port zero supports enhanced functions).\r
+* Returns FALSE otherwise.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_INLINE boolean_t AL_API\r
+ib_switch_info_is_enhanced_port0(\r
+ IN const ib_switch_info_t* const p_si )\r
+{\r
+ return( (p_si->flags & 0x08) == 0x08 );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_si\r
+* [in] Pointer to a SwitchInfo attribute.\r
+*\r
+* RETURN VALUES\r
+* Returns TRUE if the switch supports enhanced port 0. FALSE otherwise.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
+/****s* IBA Base: Types/ib_guid_info_t\r
+* NAME\r
+* ib_guid_info_t\r
+*\r
+* DESCRIPTION\r
+* IBA defined GuidInfo. (14.2.5.5)\r
+*\r
+* SYNOPSIS\r
+*/\r
+#define GUID_TABLE_MAX_ENTRIES 8\r
+\r
+#include <complib/cl_packon.h>\r
+typedef struct _ib_guid_info\r
+{\r
+ ib_net64_t guid[GUID_TABLE_MAX_ENTRIES];\r
+\r
+} PACK_SUFFIX ib_guid_info_t;\r
+#include <complib/cl_packoff.h>\r
+/************/\r
+\r
+#include <complib/cl_packon.h>\r
+typedef struct _ib_guidinfo_record\r
+{\r
+ ib_net16_t lid;\r
+ uint8_t block_num;\r
+ uint8_t resv;\r
+ uint32_t reserved;\r
+ ib_guid_info_t guid_info;\r
+} PACK_SUFFIX ib_guidinfo_record_t;\r
+#include <complib/cl_packoff.h>\r
+\r
+#define IB_MULTIPATH_MAX_GIDS 11 /* Support max that can fit into first MAD (for now) */\r
+\r
+#include <complib/cl_packon.h>\r
+typedef struct _ib_multipath_rec_t\r
+{\r
+ ib_net32_t hop_flow_raw;\r
+ uint8_t tclass;\r
+ uint8_t num_path;\r
+ ib_net16_t pkey;\r
+ uint8_t resv0;\r
+ uint8_t sl;\r
+ uint8_t mtu;\r
+ uint8_t rate;\r
+ uint8_t pkt_life;\r
+ uint8_t resv1;\r
+ uint8_t independence; /* formerly resv2 */\r
+ uint8_t sgid_count;\r
+ uint8_t dgid_count;\r
+ uint8_t resv3[7];\r
+ ib_gid_t gids[IB_MULTIPATH_MAX_GIDS];\r
+} PACK_SUFFIX ib_multipath_rec_t;\r
+#include <complib/cl_packoff.h>\r
+/*\r
+* FIELDS\r
+* hop_flow_raw\r
+* Global routing parameters: hop count, flow label and raw bit.\r
+*\r
+* tclass\r
+* Another global routing parameter.\r
+*\r
+* num_path\r
+* Reversible path - 1 bit to say if path is reversible.\r
+* num_path [6:0] In queries, maximum number of paths to return.\r
+* In responses, undefined.\r
+*\r
+* pkey\r
+* Partition key (P_Key) to use on this path.\r
+*\r
+* sl\r
+* Service level to use on this path.\r
+*\r
+* mtu\r
+* MTU and MTU selector fields to use on this path\r
+* rate\r
+* Rate and rate selector fields to use on this path.\r
+*\r
+* pkt_life\r
+* Packet lifetime\r
+*\r
+* preference\r
+* Indicates the relative merit of this path versus other path\r
+* records returned from the SA. Lower numbers are better.\r
+*\r
+* SEE ALSO\r
+*********/\r
+\r
+/****f* IBA Base: Types/ib_multipath_rec_num_path\r
+* NAME\r
+* ib_multipath_rec_num_path\r
+*\r
+* DESCRIPTION\r
+* Get max number of paths to return.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_INLINE uint8_t AL_API\r
+ib_multipath_rec_num_path(\r
+ IN const ib_multipath_rec_t* const p_rec )\r
+{\r
+ return( p_rec->num_path &0x7F );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_rec\r
+* [in] Pointer to the multipath record object.\r
+*\r
+* RETURN VALUES\r
+* Maximum number of paths to return for each unique SGID_DGID combination.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+* ib_multipath_rec_t\r
+*********/\r
+\r
+/****f* IBA Base: Types/ib_multipath_rec_sl\r
+* NAME\r
+* ib_multipath_rec_sl\r
+*\r
+* DESCRIPTION\r
+* Get multipath service level.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_INLINE uint8_t AL_API\r
+ib_multipath_rec_sl(\r
+ IN const ib_multipath_rec_t* const p_rec )\r
+{\r
+ return( (uint8_t)((cl_ntoh16( p_rec->sl )) & 0xF) );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_rec\r
+* [in] Pointer to the multipath record object.\r
+*\r
+* RETURN VALUES\r
+* SL.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+* ib_multipath_rec_t\r
+*********/\r
+\r
+/****f* IBA Base: Types/ib_multipath_rec_mtu\r
+* NAME\r
+* ib_multipath_rec_mtu\r
+*\r
+* DESCRIPTION\r
+* Get encoded path MTU.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_INLINE uint8_t AL_API\r
+ib_multipath_rec_mtu(\r
+ IN const ib_multipath_rec_t* const p_rec )\r
+{\r
+ return( (uint8_t)(p_rec->mtu & IB_MULTIPATH_REC_BASE_MASK) );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_rec\r
+* [in] Pointer to the multipath record object.\r
+*\r
+* RETURN VALUES\r
+* Encoded path MTU.\r
+* 1: 256\r
+* 2: 512\r
+* 3: 1024\r
+* 4: 2048\r
+* 5: 4096\r
+* others: reserved\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+* ib_multipath_rec_t\r
+*********/\r
+\r
+/****f* IBA Base: Types/ib_multipath_rec_mtu_sel\r
* NAME\r
-* ib_lft_record_t\r
+* ib_multipath_rec_mtu_sel\r
*\r
* DESCRIPTION\r
-* IBA defined LinearForwardingTableRecord (15.2.5.6)
+* Get encoded multipath MTU selector.\r
*\r
* SYNOPSIS\r
*/\r
-#include <complib/cl_packon.h>\r
-typedef struct _ib_lft_record\r
+AL_INLINE uint8_t AL_API\r
+ib_multipath_rec_mtu_sel(\r
+ IN const ib_multipath_rec_t* const p_rec )\r
{\r
- ib_net16_t lid;\r
- ib_net16_t block_num;\r
- uint32_t resv0;\r
- uint8_t lft[64];\r
-} PACK_SUFFIX ib_lft_record_t;\r
-#include <complib/cl_packoff.h>\r
-/************/\r
+ return( (uint8_t)((p_rec->mtu & IB_MULTIPATH_REC_SELECTOR_MASK) >> 6) );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_rec\r
+* [in] Pointer to the multipath record object.\r
+*\r
+* RETURN VALUES\r
+* Encoded path MTU selector value (for queries).\r
+* 0: greater than MTU specified\r
+* 1: less than MTU specified\r
+* 2: exactly the MTU specified\r
+* 3: largest MTU available\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+* ib_multipath_rec_t\r
+*********/\r
\r
-/****s* IBA Base: Types/ib_mft_record_t
-* NAME
-* ib_mft_record_t
-*
-* DESCRIPTION
-* IBA defined MulticastForwardingTableRecord (15.2.5.8)
-*
-* SYNOPSIS
-*/
-#include <complib/cl_packon.h>
-typedef struct _ib_mft_record
-{
- ib_net16_t lid;
- ib_net16_t position_block_num;
- uint32_t resv0;
- ib_net16_t mft[IB_MCAST_BLOCK_SIZE];
-} PACK_SUFFIX ib_mft_record_t;
-#include <complib/cl_packoff.h>
-/************/
-
-/****s* IBA Base: Types/ib_switch_info_t\r
+/****f* IBA Base: Types/ib_multipath_rec_rate\r
* NAME\r
-* ib_switch_info_t\r
+* ib_multipath_rec_rate\r
*\r
* DESCRIPTION\r
-* IBA defined SwitchInfo. (14.2.5.4)\r
+* Get encoded multipath rate.\r
*\r
* SYNOPSIS\r
*/\r
-#include <complib/cl_packon.h>\r
-typedef struct _ib_switch_info\r
+AL_INLINE uint8_t AL_API\r
+ib_multipath_rec_rate(\r
+ IN const ib_multipath_rec_t* const p_rec )\r
{\r
- ib_net16_t lin_cap;\r
- ib_net16_t rand_cap;\r
- ib_net16_t mcast_cap;\r
- ib_net16_t lin_top;\r
- uint8_t def_port;\r
- uint8_t def_mcast_pri_port;\r
- uint8_t def_mcast_not_port;\r
- uint8_t life_state;\r
- ib_net16_t lids_per_port;\r
- ib_net16_t enforce_cap;\r
- uint8_t flags;\r
-\r
-} PACK_SUFFIX ib_switch_info_t;\r
-#include <complib/cl_packoff.h>\r
-/************/\r
+ return( (uint8_t)(p_rec->rate & IB_MULTIPATH_REC_BASE_MASK) );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_rec\r
+* [in] Pointer to the multipath record object.\r
+*\r
+* RETURN VALUES\r
+* Encoded multipath rate.\r
+* 2: 2.5 Gb/sec.\r
+* 3: 10 Gb/sec.\r
+* 4: 30 Gb/sec.\r
+* others: reserved\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+* ib_multipath_rec_t\r
+*********/\r
\r
-#include <complib/cl_packon.h>\r
-typedef struct _ib_switch_info_record\r
+/****f* IBA Base: Types/ib_multipath_rec_rate_sel\r
+* NAME\r
+* ib_multipath_rec_rate_sel\r
+*\r
+* DESCRIPTION\r
+* Get encoded multipath rate selector.\r
+*\r
+* SYNOPSIS\r
+*/\r
+AL_INLINE uint8_t AL_API\r
+ib_multipath_rec_rate_sel(\r
+ IN const ib_multipath_rec_t* const p_rec )\r
{\r
- ib_net16_t lid;\r
- uint16_t resv0;\r
- ib_switch_info_t switch_info;\r
- uint8_t pad[3];\r
-\r
-} PACK_SUFFIX ib_switch_info_record_t;\r
-#include <complib/cl_packoff.h>\r
-\r
-#define IB_SWITCH_PSC 0x04\r
+ return( (uint8_t)((p_rec->rate & IB_MULTIPATH_REC_SELECTOR_MASK) >> 6) );\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_rec\r
+* [in] Pointer to the multipath record object.\r
+*\r
+* RETURN VALUES\r
+* Encoded path rate selector value (for queries).\r
+* 0: greater than rate specified\r
+* 1: less than rate specified\r
+* 2: exactly the rate specified\r
+* 3: largest rate available\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+* ib_multipath_rec_t\r
+*********/\r
\r
-/****f* IBA Base: Types/ib_switch_info_get_state_change\r
+/****f* IBA Base: Types/ib_multipath_rec_pkt_life\r
* NAME\r
-* ib_switch_info_get_state_change\r
+* ib_multipath_rec_pkt_life\r
*\r
* DESCRIPTION\r
-* Returns the value of the state change flag.\r
+* Get encoded multipath pkt_life.\r
*\r
* SYNOPSIS\r
*/\r
-AL_INLINE boolean_t AL_API\r
-ib_switch_info_get_state_change(\r
- IN const ib_switch_info_t* const p_si )\r
+AL_INLINE uint8_t AL_API\r
+ib_multipath_rec_pkt_life(\r
+ IN const ib_multipath_rec_t* const p_rec )\r
{\r
- return( (p_si->life_state & IB_SWITCH_PSC) == IB_SWITCH_PSC );\r
+ return( (uint8_t)(p_rec->pkt_life & IB_MULTIPATH_REC_BASE_MASK) );\r
}\r
/*\r
* PARAMETERS\r
-* p_si\r
-* [in] Pointer to a SwitchInfo attribute.\r
+* p_rec\r
+* [in] Pointer to the multipath record object.\r
*\r
* RETURN VALUES\r
-* Returns the value of the state change flag.\r
+* Encoded multipath pkt_life = 4.096 µsec * 2 ** PacketLifeTime.\r
*\r
* NOTES\r
*\r
* SEE ALSO\r
+* ib_multipath_rec_t\r
*********/\r
\r
-/****f* IBA Base: Types/ib_switch_info_clear_state_change\r
+/****f* IBA Base: Types/ib_multipath_rec_pkt_life_sel\r
* NAME\r
-* ib_switch_info_clear_state_change\r
+* ib_multipath_rec_pkt_life_sel\r
*\r
* DESCRIPTION\r
-* Clears the switch's state change bit.\r
+* Get encoded multipath pkt_lifetime selector.\r
*\r
* SYNOPSIS\r
*/\r
-AL_INLINE void AL_API\r
-ib_switch_info_clear_state_change(\r
- IN ib_switch_info_t* const p_si )\r
+AL_INLINE uint8_t AL_API\r
+ib_multipath_rec_pkt_life_sel(\r
+ IN const ib_multipath_rec_t* const p_rec )\r
{\r
- p_si->life_state = (uint8_t)(p_si->life_state & 0xFB);\r
+ return( (uint8_t)((p_rec->pkt_life & IB_MULTIPATH_REC_SELECTOR_MASK) >> 6 ));\r
}\r
/*\r
* PARAMETERS\r
-* p_ni\r
-* [in] Pointer to a PortInfo attribute.\r
+* p_rec\r
+* [in] Pointer to the multipath record object.\r
*\r
* RETURN VALUES\r
-* Returns the LMC value assigned to this port.\r
+* Encoded path pkt_lifetime selector value (for queries).\r
+* 0: greater than rate specified\r
+* 1: less than rate specified\r
+* 2: exactly the rate specified\r
+* 3: smallest packet lifetime available\r
*\r
* NOTES\r
*\r
* SEE ALSO\r
+* ib_multipath_rec_t\r
*********/\r
\r
-/****f* IBA Base: Types/ib_switch_info_is_enhanced_port0
-* NAME
-* ib_switch_info_is_enhanced_port0
-*
-* DESCRIPTION
-* Returns TRUE if the enhancedPort0 bit is on (meaning the switch
-* port zero supports enhanced functions).
-* Returns FALSE otherwise.
-*
-* SYNOPSIS
-*/
-AL_INLINE boolean_t AL_API\r
-ib_switch_info_is_enhanced_port0(
- IN const ib_switch_info_t* const p_si )
-{
- return( (p_si->flags & 0x08) == 0x08 );
-}
-/*
-* PARAMETERS
-* p_si
-* [in] Pointer to a SwitchInfo attribute.
-*
-* RETURN VALUES
-* Returns TRUE if the switch supports enhanced port 0. FALSE otherwise.
-*
-* NOTES
-*
-* SEE ALSO
-*********/
-
-/****s* IBA Base: Types/ib_guid_info_t\r
+#define IB_NUM_PKEY_ELEMENTS_IN_BLOCK 32\r
+/****s* IBA Base: Types/ib_pkey_table_t\r
* NAME\r
-* ib_guid_info_t\r
+* ib_pkey_table_t\r
*\r
* DESCRIPTION\r
-* IBA defined GuidInfo. (14.2.5.5)\r
+* IBA defined PKey table. (14.2.5.7)\r
*\r
* SYNOPSIS\r
*/\r
-#define GUID_TABLE_MAX_ENTRIES 8\r
\r
#include <complib/cl_packon.h>\r
-typedef struct _ib_guid_info\r
+typedef struct _ib_pkey_table\r
{\r
- ib_net64_t guid[GUID_TABLE_MAX_ENTRIES];\r
+ ib_net16_t pkey_entry[IB_NUM_PKEY_ELEMENTS_IN_BLOCK];\r
\r
-} PACK_SUFFIX ib_guid_info_t;\r
+} PACK_SUFFIX ib_pkey_table_t;\r
#include <complib/cl_packoff.h>\r
/************/\r
\r
-#include <complib/cl_packon.h>
-typedef struct _ib_guidinfo_record
-{
- ib_net16_t lid;
- uint8_t block_num;
- uint8_t resv;
- uint32_t reserved;
- ib_guid_info_t guid_info;
-} PACK_SUFFIX ib_guidinfo_record_t;
-#include <complib/cl_packoff.h>
-
-#define IB_MULTIPATH_MAX_GIDS 11 /* Support max that can fit into first MAD (for now) */
-
-#include <complib/cl_packon.h>
-typedef struct _ib_multipath_rec_t
-{
- ib_net32_t hop_flow_raw;
- uint8_t tclass;
- uint8_t num_path;
- ib_net16_t pkey;
- uint8_t resv0;
- uint8_t sl;
- uint8_t mtu;
- uint8_t rate;
- uint8_t pkt_life;
- uint8_t resv1;
- uint8_t independence; /* formerly resv2 */
- uint8_t sgid_count;
- uint8_t dgid_count;
- uint8_t resv3[7];
- ib_gid_t gids[IB_MULTIPATH_MAX_GIDS];
-} PACK_SUFFIX ib_multipath_rec_t;
-#include <complib/cl_packoff.h>
-/*
-* FIELDS
-* hop_flow_raw
-* Global routing parameters: hop count, flow label and raw bit.
-*
-* tclass
-* Another global routing parameter.
-*
-* num_path
-* Reversible path - 1 bit to say if path is reversible.
-* num_path [6:0] In queries, maximum number of paths to return.
-* In responses, undefined.
-*
-* pkey
-* Partition key (P_Key) to use on this path.
-*
-* sl
-* Service level to use on this path.
-*
-* mtu
-* MTU and MTU selector fields to use on this path
-* rate
-* Rate and rate selector fields to use on this path.
-*
-* pkt_life
-* Packet lifetime
-*
-* preference
-* Indicates the relative merit of this path versus other path
-* records returned from the SA. Lower numbers are better.
-*
-* SEE ALSO
-*********/
-
-/****f* IBA Base: Types/ib_multipath_rec_num_path
-* NAME
-* ib_multipath_rec_num_path
-*
-* DESCRIPTION
-* Get max number of paths to return.
-*
-* SYNOPSIS
-*/
-AL_INLINE uint8_t AL_API\r
-ib_multipath_rec_num_path(
- IN const ib_multipath_rec_t* const p_rec )
-{
- return( p_rec->num_path &0x7F );
-}
-/*
-* PARAMETERS
-* p_rec
-* [in] Pointer to the multipath record object.
-*
-* RETURN VALUES
-* Maximum number of paths to return for each unique SGID_DGID combination.
-*
-* NOTES
-*
-* SEE ALSO
-* ib_multipath_rec_t
-*********/
-
-/****f* IBA Base: Types/ib_multipath_rec_sl
-* NAME
-* ib_multipath_rec_sl
-*
-* DESCRIPTION
-* Get multipath service level.
-*
-* SYNOPSIS
-*/
-AL_INLINE uint8_t AL_API\r
-ib_multipath_rec_sl(
- IN const ib_multipath_rec_t* const p_rec )
-{
- return( (uint8_t)((cl_ntoh16( p_rec->sl )) & 0xF) );
-}
-/*
-* PARAMETERS
-* p_rec
-* [in] Pointer to the multipath record object.
-*
-* RETURN VALUES
-* SL.
-*
-* NOTES
-*
-* SEE ALSO
-* ib_multipath_rec_t
-*********/
-
-/****f* IBA Base: Types/ib_multipath_rec_mtu
-* NAME
-* ib_multipath_rec_mtu
-*
-* DESCRIPTION
-* Get encoded path MTU.
-*
-* SYNOPSIS
-*/
-AL_INLINE uint8_t AL_API\r
-ib_multipath_rec_mtu(
- IN const ib_multipath_rec_t* const p_rec )
-{
- return( (uint8_t)(p_rec->mtu & IB_MULTIPATH_REC_BASE_MASK) );
-}
-/*
-* PARAMETERS
-* p_rec
-* [in] Pointer to the multipath record object.
-*
-* RETURN VALUES
-* Encoded path MTU.
-* 1: 256
-* 2: 512
-* 3: 1024
-* 4: 2048
-* 5: 4096
-* others: reserved
-*
-* NOTES
-*
-* SEE ALSO
-* ib_multipath_rec_t
-*********/
-
-/****f* IBA Base: Types/ib_multipath_rec_mtu_sel
-* NAME
-* ib_multipath_rec_mtu_sel
-*
-* DESCRIPTION
-* Get encoded multipath MTU selector.
-*
-* SYNOPSIS
-*/
-AL_INLINE uint8_t AL_API\r
-ib_multipath_rec_mtu_sel(
- IN const ib_multipath_rec_t* const p_rec )
-{
- return( (uint8_t)((p_rec->mtu & IB_MULTIPATH_REC_SELECTOR_MASK) >> 6) );
-}
-/*
-* PARAMETERS
-* p_rec
-* [in] Pointer to the multipath record object.
-*
-* RETURN VALUES
-* Encoded path MTU selector value (for queries).
-* 0: greater than MTU specified
-* 1: less than MTU specified
-* 2: exactly the MTU specified
-* 3: largest MTU available
-*
-* NOTES
-*
-* SEE ALSO
-* ib_multipath_rec_t
-*********/
-
-/****f* IBA Base: Types/ib_multipath_rec_rate
-* NAME
-* ib_multipath_rec_rate
-*
-* DESCRIPTION
-* Get encoded multipath rate.
-*
-* SYNOPSIS
-*/
-AL_INLINE uint8_t AL_API\r
-ib_multipath_rec_rate(
- IN const ib_multipath_rec_t* const p_rec )
-{
- return( (uint8_t)(p_rec->rate & IB_MULTIPATH_REC_BASE_MASK) );
-}
-/*
-* PARAMETERS
-* p_rec
-* [in] Pointer to the multipath record object.
-*
-* RETURN VALUES
-* Encoded multipath rate.
-* 2: 2.5 Gb/sec.
-* 3: 10 Gb/sec.
-* 4: 30 Gb/sec.
-* others: reserved
-*
-* NOTES
-*
-* SEE ALSO
-* ib_multipath_rec_t
-*********/
-
-/****f* IBA Base: Types/ib_multipath_rec_rate_sel
-* NAME
-* ib_multipath_rec_rate_sel
-*
-* DESCRIPTION
-* Get encoded multipath rate selector.
-*
-* SYNOPSIS
-*/
-AL_INLINE uint8_t AL_API\r
-ib_multipath_rec_rate_sel(
- IN const ib_multipath_rec_t* const p_rec )
-{
- return( (uint8_t)((p_rec->rate & IB_MULTIPATH_REC_SELECTOR_MASK) >> 6) );
-}
-/*
-* PARAMETERS
-* p_rec
-* [in] Pointer to the multipath record object.
-*
-* RETURN VALUES
-* Encoded path rate selector value (for queries).
-* 0: greater than rate specified
-* 1: less than rate specified
-* 2: exactly the rate specified
-* 3: largest rate available
-*
-* NOTES
-*
-* SEE ALSO
-* ib_multipath_rec_t
-*********/
-
-/****f* IBA Base: Types/ib_multipath_rec_pkt_life
-* NAME
-* ib_multipath_rec_pkt_life
-*
-* DESCRIPTION
-* Get encoded multipath pkt_life.
-*
-* SYNOPSIS
-*/
-AL_INLINE uint8_t AL_API\r
-ib_multipath_rec_pkt_life(
- IN const ib_multipath_rec_t* const p_rec )
-{
- return( (uint8_t)(p_rec->pkt_life & IB_MULTIPATH_REC_BASE_MASK) );
-}
-/*
-* PARAMETERS
-* p_rec
-* [in] Pointer to the multipath record object.
-*
-* RETURN VALUES
-* Encoded multipath pkt_life = 4.096 µsec * 2 ** PacketLifeTime.
-*
-* NOTES
-*
-* SEE ALSO
-* ib_multipath_rec_t
-*********/
-
-/****f* IBA Base: Types/ib_multipath_rec_pkt_life_sel
-* NAME
-* ib_multipath_rec_pkt_life_sel
-*
-* DESCRIPTION
-* Get encoded multipath pkt_lifetime selector.
-*
-* SYNOPSIS
-*/
-AL_INLINE uint8_t AL_API\r
-ib_multipath_rec_pkt_life_sel(
- IN const ib_multipath_rec_t* const p_rec )
-{
- return( (uint8_t)((p_rec->pkt_life & IB_MULTIPATH_REC_SELECTOR_MASK) >> 6 ));
-}
-/*
-* PARAMETERS
-* p_rec
-* [in] Pointer to the multipath record object.
-*
-* RETURN VALUES
-* Encoded path pkt_lifetime selector value (for queries).
-* 0: greater than rate specified
-* 1: less than rate specified
-* 2: exactly the rate specified
-* 3: smallest packet lifetime available
-*
-* NOTES
-*
-* SEE ALSO
-* ib_multipath_rec_t
-*********/\r
-/****s* IBA Base: Types/ib_pkey_table_info_t\r
-* NAME\r
-* ib_pkey_table_info_t\r
+/****s* IBA Base: Types/ib_pkey_table_record_t\r
+* NAME\r
+* ib_pkey_table_record_t\r
*\r
* DESCRIPTION\r
-* IBA defined PKey table. (14.2.5.7)\r
+* IBA defined P_Key Table Record for SA Query. (15.2.5.11)\r
*\r
* SYNOPSIS\r
*/\r
-#define PKEY_TABLE_MAX_ENTRIES 32\r
-\r
#include <complib/cl_packon.h>\r
-typedef struct _ib_pkey_table_info\r
+typedef struct _ib_pkey_table_record\r
{\r
- ib_net16_t pkey[PKEY_TABLE_MAX_ENTRIES];\r
+ ib_net16_t lid; // for CA: lid of port, for switch lid of port 0\r
+ uint16_t block_num;\r
+ uint8_t port_num; // for switch: port number, for CA: reserved\r
+ uint8_t reserved1;\r
+ uint16_t reserved2;\r
+ ib_pkey_table_t pkey_tbl;\r
\r
-} PACK_SUFFIX ib_pkey_table_info_t;\r
+} PACK_SUFFIX ib_pkey_table_record_t;\r
#include <complib/cl_packoff.h>\r
/************/\r
\r
+#define IB_DROP_VL 15\r
#define IB_MAX_NUM_VLS 16\r
/****s* IBA Base: Types/ib_slvl_table_t\r
* NAME\r
#include <complib/cl_packoff.h>\r
/************/\r
\r
-/****f* IBA Base: Types/ib_slvl_table_get_vl\r
+/****s* IBA Base: Types/ib_slvl_table_record_t\r
* NAME\r
-* ib_slvl_table_get_vl\r
+* ib_slvl_table_record_t\r
*\r
* DESCRIPTION\r
-* Retrieves the VL for a given SL from an SL to VL mapping table.\r
+* IBA defined SL to VL Mapping Table Record for SA Query. (15.2.5.4)\r
*\r
* SYNOPSIS\r
*/\r
-AL_INLINE uint8_t AL_API\r
-ib_slvl_table_get_vl(\r
- IN const ib_slvl_table_t* const p_slvl_tbl,\r
- IN const uint8_t sl )\r
+#include <complib/cl_packon.h>\r
+typedef struct _ib_slvl_table_record\r
{\r
- uint8_t vl;\r
-\r
- /* There are two VL's per byte. */\r
- vl = p_slvl_tbl->vl_table[sl/2];\r
- /* If odd, shift down 4 bits. */\r
- if( sl % 2 )\r
- vl >>= 4;\r
+ ib_net16_t lid; // for CA: lid of port, for switch lid of port 0\r
+ uint8_t in_port_num; // reserved for CAs\r
+ uint8_t out_port_num; // reserved for CAs\r
+ uint32_t resv;\r
+ ib_slvl_table_t slvl_tbl;\r
\r
- /* Mask off upper bits and return. */\r
- return vl & 0x0F;\r
-}\r
-/*\r
-* PARAMETERS\r
-* p_slvl_tbl\r
-* [in] Pointer to the SL to VL mapping table from which to return the VL.\r
-*\r
-* sl\r
-* [in] SL in the table for which to return the VL.\r
-*\r
-* RETURN VALUES\r
-* Returns the VL value for the specified SL in the provided table.\r
-*\r
-* SEE ALSO\r
-* ib_slvl_table_t, ib_slvl_table_set_vl\r
-*********/\r
+} PACK_SUFFIX ib_slvl_table_record_t;\r
+#include <complib/cl_packoff.h>\r
+/************/\r
\r
-/****f* IBA Base: Types/ib_slvl_table_set_vl\r
+/****f* IBA Base: Types/ib_slvl_table_set\r
* NAME\r
-* ib_slvl_table_set_vl\r
+* ib_slvl_table_set\r
*\r
* DESCRIPTION\r
-* Sets the VL for a given SL in an SL to VL mapping table.\r
+* Set slvl table entry.\r
*\r
* SYNOPSIS\r
*/\r
AL_INLINE void AL_API\r
-ib_slvl_table_set_vl(\r
- IN OUT ib_slvl_table_t* const p_slvl_tbl,\r
- IN const uint8_t sl,\r
- IN const uint8_t vl )\r
-{\r
- uint8_t entry;\r
-\r
- /* Get the current value for the byte in which the VL is stored. */\r
- entry = p_slvl_tbl->vl_table[sl/2];\r
+ib_slvl_table_set(\r
+ IN ib_slvl_table_t* p_slvl_tbl,\r
+ IN const uint8_t sl_index,\r
+ IN const uint8_t vl )\r
+ {\r
+ uint8_t idx = sl_index/2;\r
+ CL_ASSERT(vl <= 15);\r
+ CL_ASSERT(sl_index <= 15);\r
\r
- /* Clear the appropriate bits and set the new VL value. */\r
- if( sl % 2 )\r
+ if (sl_index%2)\r
{\r
- entry &= 0x0F;\r
- entry |= ((vl & 0x0F) << 4);\r
+ /* this is an odd sl. Need to update the ls bits */\r
+ p_slvl_tbl->vl_table[idx] = ( p_slvl_tbl->vl_table[idx] & 0xF0 ) | vl ;\r
}\r
else\r
{\r
- entry &= 0xF0;\r
- entry |= (vl & 0x0F);\r
+ /* this is an even sl. Need to update the ms bits */\r
+ p_slvl_tbl->vl_table[idx] = ( vl << 4 ) | ( p_slvl_tbl->vl_table[idx] & 0x0F );\r
}\r
- /* Store the updated entry back into the table. */\r
- p_slvl_tbl->vl_table[sl/2] = entry;\r
}\r
/*\r
* PARAMETERS\r
-* slvl_tbl\r
-* [in/out] Pointer to the SL to VL mapping table in which to store the VL.\r
+* p_slvl_tbl\r
+* [in] pointer to ib_slvl_table_t object.\r
*\r
-* sl\r
-* [in] SL in the table for which to store the VL.\r
+* sl_index\r
+* [in] the sl index in the table to be updated.\r
*\r
* vl\r
-* [in] VL to store at the specifed SL.\r
+* [in] the vl value to update for that sl.\r
*\r
* RETURN VALUES\r
-* This function does not return a value.\r
+* None\r
+*\r
+* NOTES\r
*\r
* SEE ALSO\r
-* ib_slvl_table_t, ib_slvl_table_get_vl\r
+* ib_slvl_table_t\r
*********/\r
\r
-/****s* IBA Base: Types/ib_slvl_table_record_t\r
+/****f* IBA Base: Types/ib_slvl_table_get\r
* NAME\r
-* ib_slvl_table_record_t\r
+* ib_slvl_table_get\r
*\r
* DESCRIPTION\r
-* IBA defined Sl to VL Mapping Table Record for SA Query. (15.2.5.4)\r
+* Get slvl table entry.\r
*\r
* SYNOPSIS\r
*/\r
-#include <complib/cl_packon.h>\r
-typedef struct _ib_slvl_table_record\r
+AL_INLINE uint8_t AL_API\r
+ib_slvl_table_get(\r
+IN const ib_slvl_table_t* p_slvl_tbl,\r
+IN const uint8_t sl_index )\r
{\r
- ib_net16_t lid; // for CA: lid of port, for switch lid of port 0\r
- uint8_t in_port_num; // reserved for CA's\r
- uint8_t out_port_num; // reserved for CA's\r
- uint32_t resv;\r
- ib_slvl_table_t slvl_tbl;\r
+ uint8_t idx = sl_index/2;\r
+ CL_ASSERT(sl_index <= 15);\r
\r
-} PACK_SUFFIX ib_slvl_table_record_t;\r
-#include <complib/cl_packoff.h>\r
-/************/\r
+ if (sl_index%2)\r
+ {\r
+ /* this is an odd sl. Need to return the ls bits. */\r
+ return ( p_slvl_tbl->vl_table[idx] & 0x0F );\r
+ }\r
+ else\r
+ {\r
+ /* this is an even sl. Need to return the ms bits. */\r
+ return ( (p_slvl_tbl->vl_table[idx] & 0xF0) >> 4 );\r
+ }\r
+}\r
+/*\r
+* PARAMETERS\r
+* p_slvl_tbl\r
+* [in] pointer to ib_slvl_table_t object.\r
+*\r
+* sl_index\r
+* [in] the sl index in the table whose value should be returned.\r
+*\r
+* RETURN VALUES\r
+* vl for the requested sl_index.\r
+*\r
+* NOTES\r
+*\r
+* SEE ALSO\r
+* ib_slvl_table_t\r
+*********/\r
\r
/****s* IBA Base: Types/ib_vl_arb_element_t\r
* NAME\r
{\r
uint8_t res_vl;\r
uint8_t weight;\r
-\r
} PACK_SUFFIX ib_vl_arb_element_t;\r
#include <complib/cl_packoff.h>\r
/************/\r
*********/\r
\r
#define IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK 32\r
+\r
/****s* IBA Base: Types/ib_vl_arb_table_t\r
* NAME\r
* ib_vl_arb_table_t\r
typedef struct _ib_vl_arb_table\r
{\r
ib_vl_arb_element_t vl_entry[IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK];\r
-\r
} PACK_SUFFIX ib_vl_arb_table_t;\r
#include <complib/cl_packoff.h>\r
/************/\r
uint8_t block_num;\r
uint32_t reserved;\r
ib_vl_arb_table_t vl_arb_tbl;\r
-\r
} PACK_SUFFIX ib_vl_arb_table_record_t;\r
#include <complib/cl_packoff.h>\r
/************/\r
uint8_t hop_limit;\r
ib_gid_t src_gid;\r
ib_gid_t dest_gid;\r
-\r
} PACK_SUFFIX ib_grh_t;\r
#include <complib/cl_packoff.h>\r
/************/\r
* ib_member_get_sl_flow_hop\r
*\r
* DESCRIPTION\r
-* Get encoded sl, flow label, and hop limit
+* Get encoded sl, flow label, and hop limit\r
*\r
* SYNOPSIS\r
*/\r
/*\r
* PARAMETERS\r
* sl_flow_hop\r
-* [in] the sl, flow label, and hop limit of MC Group
+* [in] the sl, flow label, and hop limit of MC Group\r
*\r
* RETURN VALUES\r
* p_sl\r
* ib_member_set_sl_flow_hop\r
*\r
* DESCRIPTION\r
-* Set encoded sl, flow label, and hop limit
+* Set encoded sl, flow label, and hop limit\r
*\r
* SYNOPSIS\r
*/\r
*\r
* RETURN VALUES\r
* sl_flow_hop\r
-* [out] the encoded sl, flow label, and hop limit
+* [out] the encoded sl, flow label, and hop limit\r
*\r
* NOTES\r
*\r
#define IB_NOTICE_TYPE_INFO 0x04\r
#define IB_NOTICE_TYPE_EMPTY 0x7F\r
\r
-\r
#include <complib/cl_packon.h>\r
typedef struct _ib_mad_notice_attr\r
{\r
} PACK_SUFFIX ib_inform_info_t;\r
#include <complib/cl_packoff.h>\r
\r
-\r
/****f* IBA Base: Types/ib_inform_get_trap_num\r
* NAME\r
* ib_inform_get_trap_num\r
* Defines known Communication management class versions\r
*/\r
#define IB_MCLASS_CM_VER_2 2\r
-#define IB_MCLASS_CM_VER_1 1
+#define IB_MCLASS_CM_VER_1 1\r
\r
/*\r
* Defines the size of user available data in communication management MADs\r
#define IB_REJ_INVALID_FLOW_LBL CL_HTON16(32)\r
#define IB_REJ_INVALID_ALT_FLOW_LBL CL_HTON16(33)\r
\r
-#define IB_REJ_SERVICE_HANDOFF CL_HTON16(65535)
-/******/
+#define IB_REJ_SERVICE_HANDOFF CL_HTON16(65535)\r
+/******/\r
\r
/****d* Access Layer/ib_apr_status_t\r
* NAME\r
typedef struct _ib_ca* __ptr64 ib_ca_handle_t;\r
typedef struct _ib_pd* __ptr64 ib_pd_handle_t;\r
typedef struct _ib_mr* __ptr64 ib_mr_handle_t;\r
-typedef struct _mlnx_fmr* __ptr64 mlnx_fmr_handle_t;\r
typedef struct _ib_mw* __ptr64 ib_mw_handle_t;\r
typedef struct _ib_qp* __ptr64 ib_qp_handle_t;\r
typedef struct _ib_srq* __ptr64 ib_srq_handle_t;\r
typedef struct _ib_cq* __ptr64 ib_cq_handle_t;\r
typedef struct _ib_av* __ptr64 ib_av_handle_t;\r
typedef struct _ib_mcast* __ptr64 ib_mcast_handle_t;\r
+typedef struct _mlnx_fmr* __ptr64 mlnx_fmr_handle_t;\r
+typedef struct _mlnx_fmr_pool* __ptr64 mlnx_fmr_pool_handle_t;\r
\r
\r
/****d* Access Layer/ib_api_status_t\r
/*\r
* VALUES\r
* IB_AE_SQ_ERROR\r
-* An error occurred when accessing the send queue of the QP or EEC.
+* An error occurred when accessing the send queue of the QP or EEC.\r
* This event is optional.\r
*\r
* IB_AE_SQ_DRAINED\r
* applicable, has received all acknowledgements for those messages.\r
*\r
* IB_AE_RQ_ERROR\r
-* An error occurred when accessing the receive queue of the QP or EEC.
+* An error occurred when accessing the receive queue of the QP or EEC.\r
* This event is optional.\r
*\r
* IB_AE_CQ_ERROR\r
* Indicates ability to check port number in address handles.\r
*\r
* change_primary_port\r
-* Indicates ability to change primary port for a QP or EEC during a
+* Indicates ability to change primary port for a QP or EEC during a\r
* SQD->RTS transition.\r
*\r
* modify_wr_depth\r
{\r
IB_QPT_RELIABLE_CONN = 0, /* Matches CM REQ transport type */\r
IB_QPT_UNRELIABLE_CONN = 1, /* Matches CM REQ transport type */\r
- IB_QPT_RELIABLE_DGRM = 2, /* Matches CM REQ transport type */
- IB_QPT_UNRELIABLE_DGRM,
+ IB_QPT_RELIABLE_DGRM = 2, /* Matches CM REQ transport type */\r
+ IB_QPT_UNRELIABLE_DGRM,\r
IB_QPT_QP0,\r
IB_QPT_QP1,\r
IB_QPT_RAW_IPV6,\r
* IB_QPT_UNRELIABLE_CONN\r
* Unreliable, connected queue pair.\r
*\r
-* IB_QPT_RELIABLE_DGRM
-* Reliable, datagram queue pair.
-*
+* IB_QPT_RELIABLE_DGRM\r
+* Reliable, datagram queue pair.\r
+*\r
* IB_QPT_UNRELIABLE_DGRM\r
* Unreliable, datagram queue pair.\r
*\r
* Send immediate data with the given request.\r
*\r
* IB_SEND_OPT_FENCE\r
-* The operation is fenced. Complete all pending send operations
-* before processing this request.
+* The operation is fenced. Complete all pending send operations\r
+* before processing this request.\r
*\r
* IB_SEND_OPT_SIGNALED\r
* If the queue pair is configured for signaled completion, then\r
* vendor specific restrictions on the size of send operation that may\r
* be performed as inline.\r
*\r
-*
+*\r
* IB_SEND_OPT_LOCAL\r
* Indicates that a sent MAD request should be given to the local VPD for\r
* processing. MADs sent using this option are not placed on the wire.\r
* This send option is only valid for MAD send operations.\r
*\r
-*
+*\r
* IB_SEND_OPT_VEND_MASK\r
* This mask indicates bits reserved in the send options that may be used\r
* by the verbs provider to indicate vendor specific options. Bits set\r
\r
} ud;\r
\r
- struct _send_rd
- {
- ib_net32_t remote_qp;
- ib_net32_t remote_qkey;
- ib_net32_t eecn;
-
- } rd;
-
+ struct _send_rd\r
+ {\r
+ ib_net32_t remote_qp;\r
+ ib_net32_t remote_qkey;\r
+ ib_net32_t eecn;\r
+\r
+ } rd;\r
+\r
struct _send_raw_ether\r
{\r
ib_net16_t dest_lid;\r
\r
} ud;\r
\r
- struct _wc_rd
- {
- ib_net32_t remote_eecn;
- ib_net32_t remote_qp;
- ib_net16_t remote_lid;
- uint8_t remote_sl;
- uint32_t free_cnt;
-
- } rd;
-
+ struct _wc_rd\r
+ {\r
+ ib_net32_t remote_eecn;\r
+ ib_net32_t remote_qp;\r
+ ib_net16_t remote_lid;\r
+ uint8_t remote_sl;\r
+ uint32_t free_cnt;\r
+\r
+ } rd;\r
+\r
struct _wc_raw_ipv6\r
{\r
ib_net16_t remote_lid;\r
* wc_type\r
* Indicates the type of work completion.\r
*\r
-*
+*\r
* length\r
* The total length of the data sent or received with the work request.\r
*\r
* Identifies the source queue pair of a received datagram.\r
*\r
* recv.ud.pkey_index\r
-* The pkey index for the source queue pair. This is valid only for
-* GSI type QP's.
+* The pkey index for the source queue pair. This is valid only for\r
+* GSI type QP's.\r
*\r
* recv.ud.remote_lid\r
* The source LID of the received datagram.\r
* recv.ud.path_bits\r
* path bits...\r
*\r
-* recv.rd.remote_eecn
-* The remote end-to-end context number that sent the received message.
-*
-* recv.rd.remote_qp
-* Identifies the source queue pair of a received message.
-*
-* recv.rd.remote_lid
-* The source LID of the received message.
-*
-* recv.rd.remote_sl
-* The service level used by the source of the received message.
-*
-* recv.rd.free_cnt
-* The number of available entries in the completion queue. Reliable
-* datagrams may complete out of order, so this field may be used to
-* determine the number of additional completions that may occur.
-*
+* recv.rd.remote_eecn\r
+* The remote end-to-end context number that sent the received message.\r
+*\r
+* recv.rd.remote_qp\r
+* Identifies the source queue pair of a received message.\r
+*\r
+* recv.rd.remote_lid\r
+* The source LID of the received message.\r
+*\r
+* recv.rd.remote_sl\r
+* The service level used by the source of the received message.\r
+*\r
+* recv.rd.free_cnt\r
+* The number of available entries in the completion queue. Reliable\r
+* datagrams may complete out of order, so this field may be used to\r
+* determine the number of additional completions that may occur.\r
+*\r
* recv.raw_ipv6.remote_lid\r
* The source LID of the received message.\r
*\r
* ib_access_t\r
*****/\r
\r
+\r
+/****s* Access Layer/mlnx_fmr_pool_create_t\r
+* NAME\r
+* mlnx_fmr_pool_create_t\r
+*\r
+* DESCRIPTION\r
+* Information required to create a Mellanox fast memory region pool.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _mlnx_fmr_pool_create\r
+{\r
+ int max_pages_per_fmr;\r
+ uint8_t page_size; /* really - page_shift, log2 of page_size */\r
+ enum ib_access_flags access_ctrl;\r
+ int pool_size;\r
+ int dirty_watermark;\r
+ void (*flush_function)(mlnx_fmr_pool_handle_t h_pool, void *arg);\r
+ void *flush_arg;\r
+ boolean_t cache;\r
+} mlnx_fmr_pool_create_t;\r
+/*\r
+* FIELDS\r
+* max_pages\r
+* max pages in the region.\r
+*\r
+* max_maps\r
+* max times, the region can be mapped before remapping.\r
+*\r
+* page_size\r
+* log2 of the page size (e.g. 12 for 4KB).\r
+*\r
+* access_ctrl\r
+* Access rights of the registered region.\r
+*\r
+* NOTES\r
+* This is a Mellanox specific extension to verbs.\r
+*\r
+* SEE ALSO\r
+* ib_access_t\r
+*****/\r
#endif\r
\r
/****s* Access Layer/ib_phys_range_t\r
* Indicates if there is an SNMP agent accessible through the port.\r
*\r
* IB_CA_MOD_IS_DEV_MGMT_SUPPORTED\r
-* Indicates if there is a device management agent accessible
-* through the port.
+* Indicates if there is a device management agent accessible\r
+* through the port.\r
*\r
* IB_CA_MOD_IS_VEND_SUPPORTED\r
-* Indicates if there is a vendor supported agent accessible
-* through the port.
+* Indicates if there is a vendor supported agent accessible\r
+* through the port.\r
*\r
* IB_CA_MOD_IS_SM\r
* Indicates if there is a subnet manager accessible through\r
* the port.\r
*\r
* IB_CA_MOD_IS_SM_DISABLED\r
-* Indicates if the port has been disabled for configuration by the
-* subnet manager.
+* Indicates if the port has been disabled for configuration by the\r
+* subnet manager.\r
*\r
* IB_CA_MOD_QKEY_CTR\r
-* Used to reset the qkey violation counter associated with the
-* port.
+* Used to reset the qkey violation counter associated with the\r
+* port.\r
*\r
* IB_CA_MOD_PKEY_CTR\r
-* Used to reset the pkey violation counter associated with the
-* port.
+* Used to reset the pkey violation counter associated with the\r
+* port.\r
*\r
* IB_CA_MOD_IS_NOTICE_SUPPORTED\r
* Indicates that this CA supports ability to generate Notices for\r
* trap messages. (only applicable to switches)\r
*\r
* IB_CA_MOD_IS_APM_SUPPORTED\r
-* Indicates that this port is capable of performing Automatic
-* Path Migration.
+* Indicates that this port is capable of performing Automatic\r
+* Path Migration.\r
*\r
* IB_CA_MOD_IS_SLMAP_SUPPORTED\r
* Indicates this port supports SLMAP capability.\r
\r
\r
#endif /* __IB_TYPES_H__ */\r
-
+\r
+\r
* IB resources provided by HCAs.\r
*********/\r
\r
-#define AL_INTERFACE_VERSION (10)\r
+#define AL_INTERFACE_VERSION (11)\r
\r
\r
\r
(*mlnx_pfn_destroy_fmr_t)(\r
IN mlnx_fmr_handle_t const h_fmr );\r
\r
+\r
+typedef ib_api_status_t\r
+(*mlnx_pfn_create_fmr_pool_t)(\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN const mlnx_fmr_pool_create_t *p_fmr_pool_attr,\r
+ OUT mlnx_fmr_pool_handle_t* const ph_pool );\r
+\r
+\r
+typedef ib_api_status_t\r
+(*mlnx_pfn_destroy_fmr_pool_t)(\r
+ IN const mlnx_fmr_pool_handle_t h_pool );\r
+\r
+\r
+typedef ib_api_status_t\r
+(*mlnx_pfn_map_phys_fmr_pool_t)(\r
+ IN const mlnx_fmr_pool_handle_t h_pool ,\r
+ IN const uint64_t* const paddr_list,\r
+ IN const int list_len,\r
+ IN OUT uint64_t* const p_vaddr,\r
+ OUT net32_t* const p_lkey,\r
+ OUT net32_t* const p_rkey,\r
+ OUT mlnx_fmr_pool_el_t *p_fmr_el);\r
+\r
+typedef ib_api_status_t\r
+(*mlnx_pfn_unmap_fmr_pool_t)(\r
+ IN mlnx_fmr_pool_el_t p_fmr_el );\r
+\r
+typedef ib_api_status_t\r
+(*mlnx_pfn_flush_fmr_pool_t)(\r
+ IN const mlnx_fmr_pool_handle_t h_pool );\r
+\r
+\r
typedef ib_api_status_t\r
(*ib_pfn_create_mw_t)(\r
IN const ib_pd_handle_t h_pd,\r
OUT uint32_t* const p_rkey,\r
- OUT ib_mw_handle_t* const ph_mw );\r
+ OUT ib_mw_handle_t* const ph_mw );\r
\r
typedef ib_api_status_t\r
(*ib_pfn_query_mw_t)(\r
IN const ib_mw_handle_t h_mw,\r
- OUT ib_pd_handle_t* const ph_pd,\r
+ OUT ib_pd_handle_t* const ph_pd,\r
OUT uint32_t* const p_rkey );\r
\r
typedef ib_api_status_t\r
mlnx_pfn_map_phys_fmr_t map_phys_mlnx_fmr;\r
mlnx_pfn_unmap_fmr_t unmap_mlnx_fmr;\r
mlnx_pfn_destroy_fmr_t destroy_mlnx_fmr;\r
+ mlnx_pfn_create_fmr_pool_t create_mlnx_fmr_pool;\r
+ mlnx_pfn_destroy_fmr_pool_t destroy_mlnx_fmr_pool;\r
+ mlnx_pfn_map_phys_fmr_pool_t map_phys_mlnx_fmr_pool;\r
+ mlnx_pfn_unmap_fmr_pool_t unmap_mlnx_fmr_pool;\r
+ mlnx_pfn_flush_fmr_pool_t flush_mlnx_fmr_pool;\r
+ \r
ib_pfn_create_srq_t create_srq;\r
ib_pfn_query_srq_t query_srq;\r
ib_pfn_modify_srq_t modify_srq;\r
HKR,"Parameters\PnpInterface",%PNPBus%,%REG_DWORD%,1\r
HKR,"Parameters","DebugLevel",%REG_DWORD%,2\r
HKR,"Parameters","DebugFlags",%REG_DWORD%,0x00ffffff\r
+HKR,"Parameters","ModeFlags",%REG_DWORD%,0\r
\r
;\r
; == The NT EventLog entries are the same for all SCSI miniports. ==\r
#define SRP_EXTENSION_ID_LENGTH 16 /* Service name extension ID length */\r
\r
#define SRP_MIN_IU_SIZE 64\r
-#define SRP_MAX_IU_SIZE 340\r
+#define SRP_MAX_SG_IN_INDIRECT_DATA_BUFFER 257 /* it was 16 */\r
+#define SRP_MAX_IU_SIZE (SRP_MIN_IU_SIZE + 20 + 16*SRP_MAX_SG_IN_INDIRECT_DATA_BUFFER)\r
\r
#define SRP_MIN_INI_TO_TGT_IU 64 // Minimum initiator message size\r
#define SRP_MIN_TGT_TO_INI_IU 56 // Minimum target message size\r
\r
p_connection->request_limit =\r
MIN( get_srp_login_response_request_limit_delta( p_srp_login_rsp ), SRP_DEFAULT_RECV_Q_DEPTH );\r
+\r
+ p_connection->request_threashold = 2;\r
+#if DBG\r
+ p_srp_session->x_req_limit = p_connection->request_limit;\r
+#endif\r
+ SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG,\r
+ ( "request_limit_delta %d, SRP_DEFAULT_RECV_Q_DEPTH %d, request_threashold %d\n", \r
+ get_srp_login_response_request_limit_delta( p_srp_login_rsp ), \r
+ SRP_DEFAULT_RECV_Q_DEPTH, p_connection->request_threashold ));\r
+\r
p_connection->send_queue_depth = p_connection->request_limit;\r
p_connection->recv_queue_depth = p_connection->request_limit;\r
p_connection->init_to_targ_iu_sz = get_srp_login_response_max_init_to_targ_iu( p_srp_login_rsp );\r
p_connection->targ_to_init_iu_sz,\r
p_connection->max_scatter_gather_entries) );\r
\r
+ /* will be used in srp_find_adapter to calculate NumberOfPhysicalBreaks */\r
+ p_srp_session->p_hba->max_sg = p_connection->max_scatter_gather_entries;\r
+\r
u.cm_mra.svc_timeout = 0x08;\r
u.cm_mra.p_mra_pdata = NULL;\r
u.cm_mra.mra_length = 0;\r
p_connection->h_qp );\r
if ( status != IB_SUCCESS )\r
{\r
- cl_free( p_connection->p_wc_array );\r
- p_connection->p_wc_array = NULL;\r
- p_connection->p_wc_free_list = NULL;\r
-rej:\r
- p_connection->state = SRP_CONNECT_FAILURE;\r
- cl_memclr( &u.cm_rej, sizeof(u.cm_rej) );\r
- u.cm_rej.rej_status = IB_REJ_INSUF_RESOURCES;\r
- p_ifc->cm_rej( p_cm_reply->h_cm_rep, &u.cm_rej );\r
- goto exit;\r
+ goto err_init_desc;\r
}\r
\r
u.cm_rtu.access_ctrl = IB_AC_LOCAL_WRITE | IB_AC_RDMA_READ | IB_AC_RDMA_WRITE;\r
{\r
SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
("Cannot Send RTU. Status = %d\n", status) );\r
- p_connection->state = SRP_CONNECT_FAILURE;\r
- goto exit;\r
+ goto err_send_rtu;\r
}\r
\r
p_connection->state = SRP_CONNECTED;\r
("ib_rearm_cq() for send cq failed!, status 0x%x", status) );\r
\r
// TODO: Kill session and inform port driver link down storportnotification\r
- p_connection->state = SRP_CONNECT_FAILURE;\r
- goto exit;\r
+ goto err_send_rtu;\r
}\r
\r
status = p_ifc->rearm_cq( p_connection->h_recv_cq, FALSE );\r
("ib_rearm_cq() for recv failed!, status 0x%x", status) );\r
\r
// TODO: Kill session and inform port driver link down storportnotification\r
- p_connection->state = SRP_CONNECT_FAILURE;\r
+ goto err_send_rtu;\r
}\r
+ goto exit;\r
\r
+err_send_rtu: \r
+ // the rest will be cleaned up in srp_session_login\r
+\r
+err_init_desc:\r
+ cl_free( p_connection->p_wc_array );\r
+ p_connection->p_wc_array = NULL;\r
+ p_connection->p_wc_free_list = NULL;\r
+\r
+rej:\r
+ p_connection->state = SRP_CONNECT_FAILURE;\r
+ cl_memclr( &u.cm_rej, sizeof(u.cm_rej) );\r
+ u.cm_rej.rej_status = IB_REJ_INSUF_RESOURCES;\r
+ p_ifc->cm_rej( p_cm_reply->h_cm_rep, &u.cm_rej );\r
\r
exit:\r
cl_status = cl_event_signal( &p_connection->conn_req_event );\r
status = IB_ERROR;\r
goto exit;\r
}\r
- p_connection->req_max_iu_msg_size = ( p_connection->ioc_max_send_msg_size >= SRP_MAX_IU_SIZE )? SRP_MAX_IU_SIZE: p_connection->ioc_max_send_msg_size;\r
+ p_connection->req_max_iu_msg_size = ( p_connection->ioc_max_send_msg_size >= SRP_MAX_IU_SIZE ) ? SRP_MAX_IU_SIZE: p_connection->ioc_max_send_msg_size;\r
+ SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_ERROR, \r
+ ( "(init_to_targ_iu_sz requested) req_max_iu_msg_size %d, (from profile) ioc_max_send_msg_size %d\n", \r
+ p_connection->req_max_iu_msg_size, p_connection->ioc_max_send_msg_size ));\r
/*\r
Build SRP Login request\r
*/\r
cm_req.qp_type = IB_QPT_RELIABLE_CONN;\r
cm_req.h_qp = p_connection->h_qp;\r
\r
- /* The maximum number of outstanding RDMA read/atomic operations. */\r
+ /* The maximum number of outstanding RDMA read/atomic operations. */\r
status = srp_get_responder_resources( p_hca, &cm_req.resp_res );\r
if ( status != IB_SUCCESS )\r
{\r
\r
atomic32_t tag;\r
atomic32_t request_limit;\r
+ int32_t request_threashold;\r
uint32_t init_to_targ_iu_sz;\r
uint32_t targ_to_init_iu_sz;\r
\r
// Final address is of the form 0b00ttttttllllllll\r
#define BUILD_SCSI_ADDRESS(lun) ((uint64_t)lun << 48)\r
\r
+#define SRP_REQUEST_LIMIT_THRESHOLD 2\r
+\r
+static ib_api_status_t\r
+__srp_map_fmr(\r
+ IN PVOID p_dev_ext,\r
+ IN PSTOR_SCATTER_GATHER_LIST p_scatter_gather_list,\r
+ IN srp_send_descriptor_t *p_send_descriptor,\r
+ IN OUT srp_memory_descriptor_t *p_memory_descriptor)\r
+{\r
+ srp_hba_t *p_hba = ((srp_ext_t *)p_dev_ext)->p_hba;\r
+ PSTOR_SCATTER_GATHER_ELEMENT p_sg_element;\r
+ uint32_t total_len = 0;\r
+ uint32_t i,j,list_len = 0;\r
+ uint64_t *p_addr_list;\r
+ uint64_t vaddr=0;\r
+ ib_api_status_t status;\r
+ srp_hca_t hca;\r
+ uint64_t fmr_page_mask;\r
+ net32_t lkey;\r
+ net32_t rkey;\r
+ srp_session_t *p_srp_session;\r
+ mlnx_fmr_pool_el_t p_fmr_el;\r
+\r
+ SRP_ENTER( SRP_DBG_DATA );\r
+\r
+ if (g_srp_mode_flags & SRP_MODE_NO_FMR_POOL)\r
+ return IB_UNSUPPORTED;\r
+\r
+ p_srp_session = p_hba->session_list[p_send_descriptor->p_srb->TargetId];\r
+ if ( p_srp_session == NULL )\r
+ return IB_INVALID_STATE;\r
+ \r
+ hca = p_srp_session->hca;\r
+ fmr_page_mask = ~(hca.fmr_page_size-1);\r
+ \r
+ for ( i = 0, p_sg_element = p_scatter_gather_list->List;\r
+ i < p_scatter_gather_list->NumberOfElements;\r
+ i++, p_sg_element++ )\r
+ {\r
+ uint32_t dma_len = p_sg_element->Length;\r
+ \r
+ if (p_sg_element->PhysicalAddress.QuadPart & ~fmr_page_mask) {\r
+ if (i > 0)\r
+ { // buffer start not from the beginning of the page is allowed only for the first SG element\r
+ SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
+ ("Unaligned address at the begin of the list\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+ }\r
+\r
+ if ((p_sg_element->PhysicalAddress.QuadPart + dma_len) & ~fmr_page_mask) {\r
+ if (i < (uint32_t)p_scatter_gather_list->NumberOfElements -1)\r
+ { // buffer end not on the beginning of the page is allowed only for the last SG element\r
+ SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
+ ("Unaligned address at the end of the list\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+ }\r
+\r
+ total_len += p_sg_element->Length;\r
+ list_len += (p_sg_element->Length + (hca.fmr_page_size-1)) >> hca.fmr_page_shift;\r
+ }\r
+\r
+ \r
+ p_addr_list = cl_zalloc(sizeof(uint64_t)*list_len);\r
+ if(!p_addr_list)\r
+ {\r
+ SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,("Failed to allocate page list\n"));\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+ \r
+ list_len = 0;\r
+ for ( i = 0, p_sg_element = p_scatter_gather_list->List;\r
+ i < p_scatter_gather_list->NumberOfElements;\r
+ i++, p_sg_element++ )\r
+ {\r
+ uint32_t dma_len = p_sg_element->Length;\r
+ for( j = 0; j < dma_len; j+=PAGE_SIZE)\r
+ {\r
+ p_addr_list[list_len++] = (p_sg_element->PhysicalAddress.QuadPart & fmr_page_mask) + j;\r
+ }\r
+ }\r
+\r
+ p_send_descriptor->p_fmr_el = NULL;\r
+ status = p_hba->ifc.map_phys_mlnx_fmr_pool\r
+ (hca.h_fmr_pool, p_addr_list, list_len, &vaddr, &lkey, &rkey, &p_fmr_el );\r
+\r
+ cl_free( p_addr_list );\r
+ \r
+ if(status != IB_SUCCESS)\r
+ {\r
+ SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,("Failed to map fmr\n"));\r
+ return status;\r
+ }\r
+\r
+ p_send_descriptor->p_fmr_el = p_fmr_el;\r
+ p_sg_element = p_scatter_gather_list->List;\r
+\r
+ p_memory_descriptor->virtual_address = cl_hton64( p_sg_element->PhysicalAddress.QuadPart & ~fmr_page_mask);\r
+ p_memory_descriptor->memory_handle = rkey;\r
+ p_memory_descriptor->data_length = cl_hton32( total_len);\r
+\r
+#if DBG\r
+ /* statistics */\r
+ p_srp_session->x_pkt_fmr++;\r
+#endif\r
+\r
+ SRP_EXIT( SRP_DBG_DATA );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
\r
static inline\r
void\r
static inline\r
void\r
__srp_process_session_send_completions(\r
- IN srp_session_t *p_session )\r
+ IN srp_session_t *p_srp_session )\r
{\r
ib_api_status_t status;\r
ib_wc_t *p_wc_done_list = NULL;\r
\r
SRP_ENTER( SRP_DBG_DATA );\r
\r
- cl_obj_lock( &p_session->obj );\r
+ cl_obj_lock( &p_srp_session->obj );\r
\r
- if ( p_session->connection.state != SRP_CONNECTED )\r
+ if ( p_srp_session->connection.state != SRP_CONNECTED )\r
{\r
- cl_obj_unlock( &p_session->obj );\r
+ cl_obj_unlock( &p_srp_session->obj );\r
SRP_EXIT( SRP_DBG_DATA );\r
return;\r
}\r
\r
- status = p_session->p_hba->ifc.poll_cq(\r
- p_session->connection.h_send_cq,\r
- &p_session->connection.p_wc_free_list,\r
+ status = p_srp_session->p_hba->ifc.poll_cq(\r
+ p_srp_session->connection.h_send_cq,\r
+ &p_srp_session->connection.p_wc_free_list,\r
&p_wc_done_list );\r
if ( status != IB_SUCCESS )\r
{\r
("ib_poll_cq() failed!, status 0x%x\n", status) );\r
\r
// TODO: Kill session and inform port driver link down scsiportnotification\r
- cl_obj_unlock( &p_session->obj );\r
+ cl_obj_unlock( &p_srp_session->obj );\r
return;\r
}\r
\r
- cl_obj_ref( &p_session->obj );\r
- cl_obj_unlock( &p_session->obj );\r
+ cl_obj_ref( &p_srp_session->obj );\r
+ cl_obj_unlock( &p_srp_session->obj );\r
\r
while ( (p_wc = p_wc_done_list) != NULL )\r
{\r
// TODO: Kill session and inform port driver link down scsiportnotification\r
SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DATA,\r
("Send Completion Status %s Vendore Status = 0x%x, \n",\r
- p_session->p_hba->ifc.get_wc_status_str( p_wc->status ),\r
+ p_srp_session->p_hba->ifc.get_wc_status_str( p_wc->status ),\r
(int)p_wc->vendor_specific));\r
\r
SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DATA,\r
// TODO: Kill session and inform port driver link down scsiportnotification\r
SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
("Send Completion Status %s Vendore Status = 0x%x, \n",\r
- p_session->p_hba->ifc.get_wc_status_str( p_wc->status ),\r
+ p_srp_session->p_hba->ifc.get_wc_status_str( p_wc->status ),\r
(int)p_wc->vendor_specific));\r
\r
SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
}\r
\r
/* Put onto head of free list */\r
- cl_obj_lock( &p_session->obj );\r
- p_wc->p_next = p_session->connection.p_wc_free_list;\r
- p_session->connection.p_wc_free_list = p_wc;\r
- cl_obj_unlock( &p_session->obj );\r
+ cl_obj_lock( &p_srp_session->obj );\r
+ p_wc->p_next = p_srp_session->connection.p_wc_free_list;\r
+ p_srp_session->connection.p_wc_free_list = p_wc;\r
+ cl_obj_unlock( &p_srp_session->obj );\r
\r
/* Get next completion */\r
p_wc = p_wc_done_list;\r
}\r
\r
/* Re-arm the CQ for more completions */\r
- status = p_session->p_hba->ifc.rearm_cq(\r
- p_session->connection.h_send_cq, FALSE );\r
+ status = p_srp_session->p_hba->ifc.rearm_cq(\r
+ p_srp_session->connection.h_send_cq, FALSE );\r
if ( status != IB_SUCCESS)\r
{\r
SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
// TODO: Kill session and inform port driver link down scsiportnotification\r
}\r
\r
- cl_obj_deref( &p_session->obj );\r
+ cl_obj_deref( &p_srp_session->obj );\r
\r
SRP_EXIT( SRP_DBG_DATA );\r
}\r
IN const ib_cq_handle_t h_cq,\r
IN void *p_context )\r
{\r
- srp_session_t *p_session = (srp_session_t *)p_context;\r
+ srp_session_t *p_srp_session = (srp_session_t *)p_context;\r
\r
SRP_ENTER( SRP_DBG_DATA );\r
\r
UNUSED_PARAM( h_cq );\r
\r
- __srp_process_session_send_completions( p_session );\r
+ __srp_process_session_send_completions( p_srp_session );\r
+\r
+ SRP_EXIT( SRP_DBG_DATA );\r
+}\r
+\r
+\r
+static inline ib_api_status_t\r
+__srp_clean_send_descriptor(\r
+ IN srp_send_descriptor_t *p_send_descriptor,\r
+ IN srp_session_t *p_srp_session )\r
+{\r
+ ib_api_status_t status = IB_SUCCESS;\r
+\r
+ if(p_srp_session && p_send_descriptor && p_send_descriptor->p_fmr_el)\r
+ {\r
+ status = p_srp_session->p_hba->ifc.unmap_mlnx_fmr_pool(p_send_descriptor->p_fmr_el);\r
+ p_send_descriptor->p_fmr_el = NULL;\r
+ }\r
+ return status;\r
+}\r
+\r
+void\r
+__srp_post_io_request(\r
+ IN PVOID p_dev_ext,\r
+ IN OUT PSCSI_REQUEST_BLOCK p_srb, \r
+ srp_session_t *p_srp_session )\r
+{\r
+ ib_api_status_t status;\r
+ srp_send_descriptor_t *p_send_descriptor = (srp_send_descriptor_t *)p_srb->SrbExtension;\r
+\r
+ SRP_ENTER( SRP_DBG_DATA );\r
+\r
+ status = srp_post_send_descriptor( &p_srp_session->descriptors,\r
+ p_send_descriptor,\r
+ p_srp_session );\r
+\r
+ if ( status == IB_SUCCESS )\r
+ {\r
+ cl_atomic_dec( &p_srp_session->connection.request_limit );\r
+#if DBG \r
+ { /* statistics */\r
+ uint32_t size = (uint32_t)cl_qlist_count(&p_srp_session->descriptors.sent_descriptors);\r
+ p_srp_session->x_sent_num++;\r
+ p_srp_session->x_sent_total += size;\r
+ if ( p_srp_session->x_sent_max < size )\r
+ p_srp_session->x_sent_max = size;\r
+ }\r
+#endif \r
+ goto exit;\r
+ }\r
+ else \r
+ {\r
+ p_srb->SrbStatus = SRB_STATUS_NO_HBA;\r
+\r
+ SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
+ ("Returning SrbStatus %s(0x%x) for Function = %s(0x%x), Path = 0x%x, "\r
+ "Target = 0x%x, Lun = 0x%x, tag 0x%I64xn",\r
+ g_srb_status_name[p_srb->SrbStatus],\r
+ p_srb->SrbStatus,\r
+ g_srb_function_name[p_srb->Function],\r
+ p_srb->Function,\r
+ p_srb->PathId,\r
+ p_srb->TargetId,\r
+ p_srb->Lun,\r
+ get_srp_command_tag( (srp_cmd_t *)p_send_descriptor->data_segment )) );\r
+\r
+ status = __srp_clean_send_descriptor( p_send_descriptor, p_srp_session );\r
+ if ( status != IB_SUCCESS )\r
+ {\r
+ SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
+ ("Failed to unmap FMR Status = %d.\n", status) );\r
+ // TODO: Kill session and inform port driver link down storportnotification\r
+ }\r
+ \r
+ StorPortNotification( RequestComplete, p_dev_ext, p_srb );\r
+ }\r
+\r
+exit:\r
+ SRP_EXIT( SRP_DBG_DATA );\r
+}\r
+\r
+static \r
+void\r
+__srp_repost_io_request(\r
+ IN srp_session_t *p_srp_session )\r
+{\r
+ srp_hba_t *p_hba;\r
+ srp_send_descriptor_t *p_send_descriptor = NULL;\r
+ srp_descriptors_t *p_descriptors = &p_srp_session->descriptors;\r
+\r
+ SRP_ENTER( SRP_DBG_DATA );\r
+\r
+ if ( !cl_qlist_count(&p_descriptors->pending_descriptors) ||\r
+ (p_srp_session->connection.request_limit <= p_srp_session->connection.request_threashold) )\r
+ goto exit;\r
+\r
+#if DBG \r
+ { /* statistics */\r
+ uint32_t size = (uint32_t)cl_qlist_count(&p_descriptors->pending_descriptors);\r
+ p_srp_session->x_pend_num++;\r
+ p_srp_session->x_pend_total += size;\r
+ if ( p_srp_session->x_pend_max < size )\r
+ p_srp_session->x_pend_max = size;\r
+ }\r
+#endif \r
+\r
+ /* in case when the follows loop will release the last pending request for sending it,\r
+ there will be race between it and StorPort, that can call srp_post_io_request\r
+ just at that moment. In the "worst" case it will cause changing order between 2 posting.\r
+ The flag 'repost_is_on' is intended for preventing ths case */\r
+ cl_atomic_inc( &p_srp_session->repost_is_on );\r
+\r
+ while (p_srp_session->connection.request_limit > p_srp_session->connection.request_threashold)\r
+ {\r
+ cl_list_item_t *p_list_item;\r
\r
+ /* extract a pending descriptor, if any */\r
+ cl_spinlock_acquire ( &p_descriptors->pending_list_lock );\r
+ p_list_item = cl_qlist_remove_head( &p_descriptors->pending_descriptors );\r
+ if ( p_list_item == cl_qlist_end( &p_descriptors->pending_descriptors ) )\r
+ {\r
+ cl_spinlock_release ( &p_descriptors->pending_list_lock );\r
+ break;\r
+ }\r
+ cl_spinlock_release ( &p_descriptors->pending_list_lock );\r
+\r
+ /* post the request */\r
+ p_hba = p_srp_session->p_hba;\r
+ p_send_descriptor = PARENT_STRUCT(p_list_item, srp_send_descriptor_t,list_item);\r
+ __srp_post_io_request( p_hba->p_ext, p_send_descriptor->p_srb, p_srp_session );\r
+ }\r
+\r
+ cl_atomic_dec( &p_srp_session->repost_is_on );\r
+\r
+exit:\r
SRP_EXIT( SRP_DBG_DATA );\r
}\r
+ \r
+static inline void\r
+__srp_fix_request_limit(\r
+ IN srp_session_t *p_srp_session,\r
+ IN srp_rsp_t *p_srp_rsp )\r
+{\r
+ int32_t rld = get_srp_response_request_limit_delta( p_srp_rsp );\r
+ cl_atomic_add( &p_srp_session->connection.request_limit, rld );\r
+#if DBG \r
+ /* statistics */\r
+ p_srp_session->x_rld_num++;\r
+ p_srp_session->x_rld_total += rld;\r
+ if ( p_srp_session->x_rld_max < rld )\r
+ p_srp_session->x_rld_max = rld;\r
+ if ( p_srp_session->x_rld_min > rld )\r
+ p_srp_session->x_rld_min = rld;\r
+#endif \r
+}\r
\r
static inline\r
ib_api_status_t\r
__srp_process_recv_completion(\r
IN srp_recv_descriptor_t *p_recv_descriptor,\r
- IN srp_session_t *p_session )\r
+ IN srp_session_t *p_srp_session )\r
{\r
ib_api_status_t status = IB_SUCCESS;\r
srp_rsp_t *p_srp_rsp;\r
response_status = get_srp_response_status( p_srp_rsp );\r
\r
p_send_descriptor = srp_find_matching_send_descriptor(\r
- &p_session->descriptors,\r
+ &p_srp_session->descriptors,\r
get_srp_response_tag( (srp_rsp_t *)p_recv_descriptor->p_data_segment ) );\r
if ( p_send_descriptor == NULL )\r
{\r
/* Repost the recv descriptor */\r
- status = p_session->p_hba->ifc.post_recv(\r
- p_session->connection.h_qp, &p_recv_descriptor->wr, NULL );\r
+ status = p_srp_session->p_hba->ifc.post_recv(\r
+ p_srp_session->connection.h_qp, &p_recv_descriptor->wr, NULL );\r
if ( status != IB_SUCCESS )\r
{\r
SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
// TODO: Kill session and inform port driver link down scsiportnotification\r
}\r
\r
- cl_atomic_add( &p_session->connection.request_limit, get_srp_response_request_limit_delta( p_srp_rsp ) );\r
+ __srp_fix_request_limit( p_srp_session, p_srp_rsp );\r
+ __srp_repost_io_request( p_srp_session );\r
\r
SRP_PRINT( TRACE_LEVEL_WARNING, SRP_DBG_DATA,\r
("Matching Send Descriptor Not Found.\n") );\r
if ( get_srp_tsk_mgmt_task_management_flags( p_srp_tsk_mgmt ) == TMF_ABORT_TASK )\r
{\r
/* Repost the recv descriptor */\r
- status = p_session->p_hba->ifc.post_recv(\r
- p_session->connection.h_qp, &p_recv_descriptor->wr, NULL );\r
+ status = p_srp_session->p_hba->ifc.post_recv(\r
+ p_srp_session->connection.h_qp, &p_recv_descriptor->wr, NULL );\r
if ( status != IB_SUCCESS )\r
{\r
SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
// TODO: Kill session and inform port driver link down storportnotification\r
}\r
\r
- cl_atomic_add( &p_session->connection.request_limit, get_srp_response_request_limit_delta( p_srp_rsp ) );\r
+ __srp_fix_request_limit( p_srp_session, p_srp_rsp );\r
+ __srp_repost_io_request( p_srp_session );\r
+\r
__srp_dump_srb_info( p_send_descriptor);\r
\r
- StorPortNotification( RequestComplete, p_session->p_hba->p_ext, p_send_descriptor->p_srb );\r
+ status = __srp_clean_send_descriptor( p_send_descriptor, p_srp_session );\r
+ if ( status != IB_SUCCESS )\r
+ {\r
+ SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
+ ("Failed to unmap FMR Status = %d.\n", status) );\r
+ // TODO: Kill session and inform port driver link down storportnotification\r
+ }\r
+\r
+ StorPortNotification( RequestComplete, p_srp_session->p_hba->p_ext, p_send_descriptor->p_srb );\r
}\r
+\r
+\r
+ \r
break;\r
}\r
\r
/* initiate session recovery */\r
SRP_PRINT( TRACE_LEVEL_WARNING, SRP_DBG_DATA,\r
("Sense Data indicates FC link connectivity has been lost.\n") );\r
- StorPortPauseDevice( p_session->p_hba->p_ext,\r
+ StorPortPauseDevice( p_srp_session->p_hba->p_ext,\r
p_send_descriptor->p_srb->PathId,\r
p_send_descriptor->p_srb->TargetId,\r
p_send_descriptor->p_srb->Lun,\r
p_send_descriptor->p_srb->DataBuffer ).QuadPart) );\r
\r
/* Repost the recv descriptor */\r
- status = p_session->p_hba->ifc.post_recv(\r
- p_session->connection.h_qp, &p_recv_descriptor->wr, NULL );\r
+ status = p_srp_session->p_hba->ifc.post_recv(\r
+ p_srp_session->connection.h_qp, &p_recv_descriptor->wr, NULL );\r
if ( status != IB_SUCCESS )\r
{\r
SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
// TODO: Kill session and inform port driver link down storportnotification\r
}\r
\r
- cl_atomic_add( &p_session->connection.request_limit, get_srp_response_request_limit_delta( p_srp_rsp ) );\r
+ __srp_fix_request_limit( p_srp_session, p_srp_rsp );\r
+ __srp_repost_io_request( p_srp_session );\r
+\r
__srp_dump_srb_info( p_send_descriptor);\r
\r
- StorPortNotification( RequestComplete, p_session->p_hba->p_ext, p_send_descriptor->p_srb );\r
+ status = __srp_clean_send_descriptor( p_send_descriptor, p_srp_session );\r
+ if ( status != IB_SUCCESS )\r
+ {\r
+ SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
+ ("Failed to unmap FMR Status = %d.\n", status) );\r
+ // TODO: Kill session and inform port driver link down storportnotification\r
+ }\r
+ \r
+ StorPortNotification( RequestComplete, p_srp_session->p_hba->p_ext, p_send_descriptor->p_srb );\r
break;\r
\r
case SRP_LOGIN_REQ:\r
static inline\r
void\r
__srp_process_session_recv_completions(\r
- IN srp_session_t *p_session )\r
+ IN srp_session_t *p_srp_session )\r
{\r
ib_api_status_t status;\r
ib_wc_t *p_wc_done_list;\r
\r
SRP_ENTER( SRP_DBG_DATA );\r
\r
- cl_obj_lock( &p_session->obj );\r
+ cl_obj_lock( &p_srp_session->obj );\r
\r
- if ( p_session->connection.state != SRP_CONNECTED )\r
+ if ( p_srp_session->connection.state != SRP_CONNECTED )\r
{\r
- cl_obj_unlock( &p_session->obj );\r
+ cl_obj_unlock( &p_srp_session->obj );\r
SRP_EXIT( SRP_DBG_DATA );\r
return;\r
}\r
\r
- status = p_session->p_hba->ifc.poll_cq(\r
- p_session->connection.h_recv_cq,\r
- &p_session->connection.p_wc_free_list,\r
+ status = p_srp_session->p_hba->ifc.poll_cq(\r
+ p_srp_session->connection.h_recv_cq,\r
+ &p_srp_session->connection.p_wc_free_list,\r
&p_wc_done_list );\r
if ( status != IB_SUCCESS )\r
{\r
\r
// TODO: Kill session and inform port driver link down scsiportnotification\r
SRP_EXIT( SRP_DBG_DATA );\r
- cl_obj_unlock( &p_session->obj );\r
+ cl_obj_unlock( &p_srp_session->obj );\r
return;\r
}\r
\r
- cl_obj_ref( &p_session->obj );\r
- cl_obj_unlock( &p_session->obj );\r
+ cl_obj_ref( &p_srp_session->obj );\r
+ cl_obj_unlock( &p_srp_session->obj );\r
\r
while ( (p_wc = p_wc_done_list) != NULL )\r
{\r
\r
if ( p_wc->status == IB_WCS_SUCCESS )\r
{\r
- status = __srp_process_recv_completion( p_recv_descriptor, p_session );\r
+ status = __srp_process_recv_completion( p_recv_descriptor, p_srp_session );\r
if ( status != IB_SUCCESS )\r
{\r
// TODO: Kill session and inform port driver link down scsiportnotification\r
{\r
SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
("Recv Completion with Error Status %s (vendore specific %#x)\n",\r
- p_session->p_hba->ifc.get_wc_status_str( p_wc->status ),\r
+ p_srp_session->p_hba->ifc.get_wc_status_str( p_wc->status ),\r
(int)p_wc->vendor_specific) );\r
}\r
else\r
{\r
SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DATA,\r
("Recv Completion Flushed in Error Status: %s\n",\r
- p_session->p_hba->ifc.get_wc_status_str( p_wc->status )));\r
+ p_srp_session->p_hba->ifc.get_wc_status_str( p_wc->status )));\r
\r
}\r
}\r
\r
/* Put onto head of free list */\r
- cl_obj_lock( &p_session->obj );\r
- p_wc->p_next = p_session->connection.p_wc_free_list;\r
- p_session->connection.p_wc_free_list = p_wc;\r
- cl_obj_unlock( &p_session->obj );\r
+ cl_obj_lock( &p_srp_session->obj );\r
+ p_wc->p_next = p_srp_session->connection.p_wc_free_list;\r
+ p_srp_session->connection.p_wc_free_list = p_wc;\r
+ cl_obj_unlock( &p_srp_session->obj );\r
\r
/* Get next completion */\r
p_wc = p_wc_done_list;\r
}\r
\r
/* Re-arm the CQ for more completions */\r
- status = p_session->p_hba->ifc.rearm_cq(\r
- p_session->connection.h_recv_cq, FALSE );\r
+ status = p_srp_session->p_hba->ifc.rearm_cq(\r
+ p_srp_session->connection.h_recv_cq, FALSE );\r
if ( status != IB_SUCCESS)\r
{\r
SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
// TODO: Kill session and inform port driver link down scsiportnotification\r
}\r
\r
- cl_obj_deref( &p_session->obj );\r
+ cl_obj_deref( &p_srp_session->obj );\r
\r
SRP_EXIT( SRP_DBG_DATA );\r
}\r
IN const ib_cq_handle_t h_cq,\r
IN void *p_context )\r
{\r
- srp_session_t *p_session = (srp_session_t *)p_context;\r
+ srp_session_t *p_srp_session = (srp_session_t *)p_context;\r
\r
SRP_ENTER( SRP_DBG_DATA );\r
\r
UNUSED_PARAM( h_cq );\r
\r
- __srp_process_session_recv_completions( p_session );\r
+ __srp_process_session_recv_completions( p_srp_session );\r
\r
SRP_EXIT( SRP_DBG_DATA );\r
}\r
ULONG scsi_direction = p_srb->SrbFlags & ( SRB_FLAGS_DATA_IN | SRB_FLAGS_DATA_OUT );\r
DATA_BUFFER_DESCRIPTOR_FORMAT format = p_srp_conn_info->descriptor_format & DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS;\r
ULONG length;\r
+#if DBG\r
+ srp_hba_t *p_hba = ((srp_ext_t *)p_dev_ext)->p_hba;\r
+ srp_session_t *p_srp_session;\r
+#endif \r
+ \r
SRP_ENTER( SRP_DBG_DATA );\r
\r
+#if DBG\r
+ /* statistics */\r
+ p_srp_session = p_hba->session_list[p_send_descriptor->p_srb->TargetId];\r
+ p_srp_session->x_pkt_built++;\r
+#endif \r
+\r
SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA,\r
("Sending I/O to Path = 0x%x, Target = 0x%x, Lun = 0x%x\n",\r
p_srb->PathId,\r
p_memory_descriptor = get_srp_command_data_out_buffer_desc( p_srp_cmd );\r
}\r
\r
+#if DBG\r
+ { /* print max SG list, gotten from the StorPort */\r
+ static ULONG s_sg_max = 0;\r
+ if ( p_scatter_gather_list && s_sg_max < p_scatter_gather_list->NumberOfElements )\r
+ {\r
+ uint32_t total = 0;\r
+ PSTOR_SCATTER_GATHER_ELEMENT p_sg_el;\r
+ for ( i = 0, p_sg_el = p_scatter_gather_list->List;\r
+ i < scatter_gather_count; i++, p_sg_el++ )\r
+ {\r
+ total += p_sg_el->Length;\r
+ }\r
+ s_sg_max = p_scatter_gather_list->NumberOfElements;\r
+ SRP_PRINT( TRACE_LEVEL_WARNING, SRP_DBG_DATA, \r
+ ( "StorPort sg_cnt %d, total %#x, max sg_cnt %d, direction %s\n",\r
+ s_sg_max, total, p_srp_conn_info->max_scatter_gather_entries,\r
+ ( scsi_direction == SRB_FLAGS_DATA_IN ) ? "IN" : "OUT" ));\r
+ }\r
+ }\r
+#endif \r
+\r
if ( p_memory_descriptor != NULL )\r
{\r
PSTOR_SCATTER_GATHER_ELEMENT p_sg_element;\r
p_table_descriptor->descriptor.virtual_address = cl_hton64( buf_addr );\r
p_table_descriptor->descriptor.memory_handle = p_srp_conn_info->rkey;\r
\r
- p_table_descriptor->descriptor.data_length =\r
- cl_hton32( sizeof(srp_memory_descriptor_t) *\r
- p_scatter_gather_list->NumberOfElements );\r
- \r
- for ( i = 0, totalLength = 0, p_sg_element = p_scatter_gather_list->List;\r
- i < p_scatter_gather_list->NumberOfElements;\r
- i++, p_memory_descriptor++, p_sg_element++ )\r
+ if((p_scatter_gather_list->NumberOfElements > 1) && !__srp_map_fmr(p_dev_ext,p_scatter_gather_list,p_send_descriptor,p_memory_descriptor))\r
{\r
- buf_addr = p_srp_conn_info->vaddr + p_sg_element->PhysicalAddress.QuadPart;\r
- \r
- p_memory_descriptor->virtual_address = cl_hton64( buf_addr );\r
- p_memory_descriptor->memory_handle = p_srp_conn_info->rkey;\r
- p_memory_descriptor->data_length = cl_hton32( p_sg_element->Length );\r
- totalLength += p_sg_element->Length;\r
+ /* Set the discriptor list len */\r
+ p_table_descriptor->descriptor.data_length =\r
+ cl_hton32( sizeof(srp_memory_descriptor_t) *1);\r
+ p_table_descriptor->total_length = p_memory_descriptor->data_length;\r
+ if ( scsi_direction == SRB_FLAGS_DATA_IN )\r
+ set_srp_command_data_in_buffer_desc_count( p_srp_cmd, 1 );\r
+ else if ( scsi_direction == SRB_FLAGS_DATA_OUT )\r
+ set_srp_command_data_out_buffer_desc_count( p_srp_cmd, 1 );\r
+\r
+ SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA,\r
+ ("virtual_address[%d] = 0x%I64x.\n",\r
+ 0, cl_ntoh64(p_memory_descriptor->virtual_address) ) );\r
+ SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA,\r
+ ("memory_handle[%d] = 0x%x.\n",\r
+ 0, cl_ntoh32( p_memory_descriptor->memory_handle) ) );\r
+ SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA,\r
+ ("data_length[%d] = %d.\n",\r
+ 0, cl_ntoh32( p_memory_descriptor->data_length) ) );\r
+ }\r
+ else\r
+ {\r
+ CL_ASSERT( scatter_gather_count ==\r
+ p_scatter_gather_list->NumberOfElements );\r
+\r
+ /* Set the descriptor list len */\r
+ p_table_descriptor->descriptor.data_length =\r
+ cl_hton32( sizeof(srp_memory_descriptor_t) *\r
+ p_scatter_gather_list->NumberOfElements );\r
+\r
+ for ( i = 0, totalLength = 0, p_sg_element = p_scatter_gather_list->List;\r
+ i < scatter_gather_count;\r
+ i++, p_memory_descriptor++, p_sg_element++ )\r
+ {\r
+ buf_addr = p_srp_conn_info->vaddr + p_sg_element->PhysicalAddress.QuadPart;\r
+ \r
+ p_memory_descriptor->virtual_address = cl_hton64( buf_addr );\r
+ p_memory_descriptor->memory_handle = p_srp_conn_info->rkey;\r
+ p_memory_descriptor->data_length = cl_hton32( p_sg_element->Length );\r
+ totalLength += p_sg_element->Length;\r
+ \r
+ SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA,\r
+ ("virtual_address[%d] = 0x%I64x.\n",\r
+ i, cl_ntoh64(p_memory_descriptor->virtual_address) ) );\r
+ SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA,\r
+ ("memory_handle[%d] = 0x%x.\n",\r
+ i, cl_ntoh32( p_memory_descriptor->memory_handle) ) );\r
+ SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA,\r
+ ("data_length[%d] = %d.\n",\r
+ i, cl_ntoh32( p_memory_descriptor->data_length) ) );\r
+ }\r
+ p_table_descriptor->total_length = cl_hton32( totalLength );\r
}\r
- p_table_descriptor->total_length = cl_hton32( totalLength );\r
}\r
else if ( format == DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR )\r
{\r
CL_ASSERT( scatter_gather_count ==\r
p_scatter_gather_list->NumberOfElements );\r
- for ( i = 0, p_sg_element = p_scatter_gather_list->List;\r
- i < scatter_gather_count; i++, p_memory_descriptor++, p_sg_element++ )\r
+ if((p_scatter_gather_list->NumberOfElements > 1) && !__srp_map_fmr(p_dev_ext,p_scatter_gather_list,p_send_descriptor,p_memory_descriptor))\r
{\r
- buf_addr = p_srp_conn_info->vaddr + p_sg_element->PhysicalAddress.QuadPart;\r
- p_memory_descriptor->virtual_address = cl_hton64( buf_addr );\r
- p_memory_descriptor->memory_handle = p_srp_conn_info->rkey;\r
- p_memory_descriptor->data_length = cl_hton32( p_sg_element->Length );\r
-\r
- SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA,\r
- ("virtual_address[%d] = 0x%I64x.\n",\r
- i, cl_ntoh64(p_memory_descriptor->virtual_address) ) );\r
- SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA,\r
- ("memory_handle[%d] = 0x%x.\n",\r
- i, cl_ntoh32( p_memory_descriptor->memory_handle) ) );\r
- SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA,\r
- ("data_length[%d] = %d.\n",\r
- i, cl_ntoh32( p_memory_descriptor->data_length) ) );\r
+ if ( scsi_direction == SRB_FLAGS_DATA_IN )\r
+ set_srp_command_data_in_buffer_desc_count( p_srp_cmd, 1 );\r
+ else if ( scsi_direction == SRB_FLAGS_DATA_OUT )\r
+ set_srp_command_data_out_buffer_desc_count( p_srp_cmd, 1 );\r
+ }\r
+ else\r
+ {\r
+ for ( i = 0, p_sg_element = p_scatter_gather_list->List;\r
+ i < scatter_gather_count; i++, p_memory_descriptor++, p_sg_element++ )\r
+ {\r
+ buf_addr = p_srp_conn_info->vaddr + p_sg_element->PhysicalAddress.QuadPart;\r
+ p_memory_descriptor->virtual_address = cl_hton64( buf_addr );\r
+ p_memory_descriptor->memory_handle = p_srp_conn_info->rkey;\r
+ p_memory_descriptor->data_length = cl_hton32( p_sg_element->Length );\r
+\r
+ SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA,\r
+ ("virtual_address[%d] = 0x%I64x.\n",\r
+ i, cl_ntoh64(p_memory_descriptor->virtual_address) ) );\r
+ SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA,\r
+ ("memory_handle[%d] = 0x%x.\n",\r
+ i, cl_ntoh32( p_memory_descriptor->memory_handle) ) );\r
+ SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA,\r
+ ("data_length[%d] = %d.\n",\r
+ i, cl_ntoh32( p_memory_descriptor->data_length) ) );\r
+ }\r
}\r
}\r
SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA,\r
IN PVOID p_dev_ext,\r
IN OUT PSCSI_REQUEST_BLOCK p_srb )\r
{\r
- ib_api_status_t status;\r
+ ib_api_status_t status = IB_SUCCESS;\r
srp_hba_t *p_hba = ((srp_ext_t *)p_dev_ext)->p_hba;\r
srp_send_descriptor_t *p_send_descriptor = (srp_send_descriptor_t *)p_srb->SrbExtension;\r
srp_session_t *p_srp_session;\r
-\r
+ srp_descriptors_t *p_descriptors;\r
+ \r
SRP_ENTER( SRP_DBG_DATA );\r
\r
cl_obj_lock( &p_hba->obj );\r
cl_obj_ref( &p_srp_session->obj );\r
cl_obj_unlock( &p_hba->obj );\r
\r
- status = srp_post_send_descriptor( &p_srp_session->descriptors,\r
- p_send_descriptor,\r
- p_srp_session );\r
+ p_descriptors = &p_srp_session->descriptors;\r
\r
- if ( status == IB_SUCCESS )\r
+ cl_spinlock_acquire ( &p_descriptors->pending_list_lock );\r
+ if ( (p_srp_session->connection.request_limit <= p_srp_session->connection.request_threashold) || \r
+ !cl_is_qlist_empty( &p_descriptors->pending_descriptors ) ||\r
+ p_srp_session->repost_is_on )\r
{\r
- cl_atomic_dec( &p_srp_session->connection.request_limit );\r
-\r
- if ( p_srp_session->connection.request_limit < 3 )\r
- {\r
- SRP_PRINT( TRACE_LEVEL_WARNING, SRP_DBG_DATA,\r
- ("Calling StorPortBusy.\n") );\r
- StorPortBusy( p_dev_ext, 1 );\r
- }\r
-\r
+ cl_spinlock_release ( &p_descriptors->pending_list_lock );\r
+ srp_add_pending_descriptor( p_descriptors, p_send_descriptor );\r
cl_obj_deref( &p_srp_session->obj );\r
goto exit;\r
}\r
+ cl_spinlock_release ( &p_descriptors->pending_list_lock );\r
\r
+ __srp_post_io_request( p_dev_ext, p_srb, p_srp_session );\r
cl_obj_deref( &p_srp_session->obj );\r
+ goto exit;\r
}\r
else\r
{\r
cl_obj_unlock( &p_hba->obj );\r
+ p_srb->SrbStatus = SRB_STATUS_NO_HBA;\r
+ goto err;\r
}\r
\r
- p_srb->SrbStatus = SRB_STATUS_NO_HBA;\r
+err:\r
SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
("Returning SrbStatus %s(0x%x) for Function = %s(0x%x), Path = 0x%x, "\r
"Target = 0x%x, Lun = 0x%x, tag 0x%I64xn",\r
p_srb->TargetId,\r
p_srb->Lun,\r
get_srp_command_tag( (srp_cmd_t *)p_send_descriptor->data_segment )) );\r
+\r
+ status = __srp_clean_send_descriptor( p_send_descriptor, p_srp_session );\r
+ if ( status != IB_SUCCESS )\r
+ {\r
+ SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
+ ("Failed to unmap FMR Status = %d.\n", status) );\r
+ // TODO: Kill session and inform port driver link down storportnotification\r
+ }\r
+ \r
StorPortNotification( RequestComplete, p_dev_ext, p_srb );\r
\r
exit:\r
IN OUT PSCSI_REQUEST_BLOCK p_srb )\r
{\r
\r
+ ib_api_status_t status = IB_SUCCESS;\r
srp_hba_t *p_hba = ((srp_ext_t *)p_dev_ext)->p_hba;\r
srp_session_t *p_srp_session;\r
\r
\r
StorPortPauseDevice( p_dev_ext, p_srb->PathId, p_srb->TargetId, p_srb->Lun, 10 );\r
\r
+ /* release this device' descriptors from the pending_list */\r
+ while ( (p_send_descriptor = srp_remove_lun_head_pending_descriptor( &p_srp_session->descriptors, p_srb->Lun )) != NULL )\r
+ {\r
+ status = __srp_clean_send_descriptor( p_send_descriptor, p_srp_session );\r
+ if ( status != IB_SUCCESS )\r
+ {\r
+ SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
+ ("Failed to unmap FMR Status = %d.\n", status) );\r
+ // TODO: Kill session and inform port driver link down storportnotification\r
+ }\r
+ \r
+ SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA,\r
+ ("Returning SrbStatus %s(0x%x) for Function = %s(0x%x), "\r
+ "Path = 0x%x, Target = 0x%x, Lun = 0x%x, tag 0x%I64xn",\r
+ g_srb_status_name[SRB_STATUS_BUS_RESET],\r
+ SRB_STATUS_BUS_RESET,\r
+ g_srb_function_name[p_send_descriptor->p_srb->Function],\r
+ p_send_descriptor->p_srb->Function,\r
+ p_send_descriptor->p_srb->PathId,\r
+ p_send_descriptor->p_srb->TargetId,\r
+ p_send_descriptor->p_srb->Lun,\r
+ get_srp_command_tag( (srp_cmd_t *)p_send_descriptor->data_segment )) );\r
+ }\r
+\r
+ /* release this device' descriptors from the sent_list */\r
while ( (p_send_descriptor = srp_remove_lun_head_send_descriptor( &p_srp_session->descriptors, p_srb->Lun )) != NULL )\r
{\r
+ status = __srp_clean_send_descriptor( p_send_descriptor, p_srp_session );\r
+ if ( status != IB_SUCCESS )\r
+ {\r
+ SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
+ ("Failed to unmap FMR Status = %d.\n", status) );\r
+ // TODO: Kill session and inform port driver link down storportnotification\r
+ }\r
+ \r
SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA,\r
("Returning SrbStatus %s(0x%x) for Function = %s(0x%x), "\r
"Path = 0x%x, Target = 0x%x, Lun = 0x%x, tag 0x%I64xn",\r
get_srp_command_tag( (srp_cmd_t *)p_send_descriptor->data_segment )) );\r
}\r
\r
+\r
p_srb->SrbStatus = SRB_STATUS_SUCCESS;\r
\r
SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DATA,\r
\r
SRP_EXIT( SRP_DBG_DATA );\r
}\r
+\r
+#if DBG \r
+\r
+/* statistics */\r
+\r
+void\r
+srp_x_clean(\r
+ IN void *p_session )\r
+{\r
+ srp_session_t *p_srp_session = p_session;\r
+\r
+ if (p_srp_session == NULL)\r
+ return;\r
+\r
+ p_srp_session->x_pkt_fmr = 0;\r
+ p_srp_session->x_pkt_built = 0;\r
+ p_srp_session->x_rld_total = 0;\r
+ p_srp_session->x_rld_num = 0;\r
+ p_srp_session->x_rld_max = 0;\r
+ p_srp_session->x_rld_min = p_srp_session->x_req_limit;\r
+ p_srp_session->x_pend_total = 0;\r
+ p_srp_session->x_pend_num = 0;\r
+ p_srp_session->x_pend_max = 0;\r
+ p_srp_session->x_sent_total = 0;\r
+ p_srp_session->x_sent_num = 0;\r
+ p_srp_session->x_sent_max = 0;\r
+}\r
+\r
+void\r
+srp_x_print(\r
+ IN void *p_session )\r
+{\r
+ srp_session_t *p_srp_session = p_session;\r
+\r
+ if (p_srp_session == NULL)\r
+ return;\r
+\r
+ SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_DATA,\r
+ ("req_limit %d, pkt_built %d, pkt_fmr'ed %d\n",\r
+ p_srp_session->x_req_limit, \r
+ p_srp_session->x_pkt_built,\r
+ p_srp_session->x_pkt_fmr ));\r
+\r
+ SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_DATA,\r
+ ("request_limit_delta: max %d, min %d, average %d, num %d\n", \r
+ p_srp_session->x_rld_max, p_srp_session->x_rld_min, \r
+ (p_srp_session->x_rld_num) ? p_srp_session->x_rld_total / p_srp_session->x_rld_num : 0,\r
+ p_srp_session->x_rld_num ));\r
+\r
+ SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_DATA,\r
+ ("pendinq_desc: max %d, average %d, num %d\n", \r
+ p_srp_session->x_pend_max, \r
+ (p_srp_session->x_pend_num) ? p_srp_session->x_pend_total / p_srp_session->x_pend_num : 0,\r
+ p_srp_session->x_pend_num ));\r
+\r
+ SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_DATA,\r
+ ("sent_desc: max %d, average %d, num %d\n", \r
+ p_srp_session->x_sent_max, \r
+ (p_srp_session->x_sent_num) ? p_srp_session->x_sent_total / p_srp_session->x_sent_num : 0,\r
+ p_srp_session->x_sent_num ));\r
+\r
+}\r
+\r
+#endif\r
\r
extern uint32_t g_srp_dbg_level;\r
extern uint32_t g_srp_dbg_flags;\r
+extern uint32_t g_srp_mode_flags;\r
+\r
+// mode flags \r
+#define SRP_MODE_NO_FMR_POOL (1 << 0) /* don't use FMR_POOL - for tuning purposes */\r
+#define SRP_MODE_SG_UNLIMITED (1 << 1) /* don't obey the limitation, stated in DDK, not to enlarge StorPort max SG */\r
\r
#if defined(EVENT_TRACING)\r
//\r
cl_memclr( p_descriptors, sizeof(*p_descriptors) );\r
\r
cl_spinlock_init ( &p_descriptors->sent_list_lock );\r
+ cl_spinlock_init ( &p_descriptors->pending_list_lock );\r
cl_qlist_init( &p_descriptors->sent_descriptors );\r
+ cl_qlist_init( &p_descriptors->pending_descriptors );\r
\r
p_descriptors->initialized = TRUE;\r
\r
if ( p_descriptors->initialized == TRUE )\r
{\r
cl_spinlock_destroy ( &p_descriptors->sent_list_lock );\r
+ cl_spinlock_destroy ( &p_descriptors->pending_list_lock );\r
\r
if ( p_descriptors->p_recv_data_segments_array != NULL )\r
{\r
return IB_SUCCESS;\r
}\r
\r
+/* __srp_add_descriptor */\r
+/*!\r
+Puts descriptor at tail of the list\r
+\r
+@param p_descriptors - pointer to the descriptors structure\r
+@param p_descriptor - pointer to the descriptor to add\r
+@param descriptors_list - pointer to the list\r
+\r
+@return - none\r
+*/\r
+inline\r
+void\r
+__srp_add_descriptor(\r
+ IN srp_send_descriptor_t *p_descriptor,\r
+ IN cl_qlist_t *descriptors_list,\r
+ IN cl_spinlock_t *p_lock)\r
+{\r
+ SRP_ENTER( SRP_DBG_DATA );\r
+\r
+ cl_spinlock_acquire ( p_lock );\r
+\r
+ cl_qlist_insert_tail( descriptors_list, &p_descriptor->list_item );\r
+ CL_ASSERT( descriptors_list == p_descriptor->list_item.p_list );\r
+\r
+ cl_spinlock_release ( p_lock );\r
+\r
+ SRP_EXIT( SRP_DBG_DATA );\r
+}\r
+\r
/* srp_add_send_descriptor */\r
/*!\r
Puts send descriptor at tail of the sent list\r
IN srp_send_descriptor_t *p_descriptor )\r
{\r
SRP_ENTER( SRP_DBG_DATA );\r
+ __srp_add_descriptor( p_descriptor, \r
+ &p_descriptors->sent_descriptors, &p_descriptors->sent_list_lock );\r
+ SRP_EXIT( SRP_DBG_DATA );\r
+}\r
+\r
+/* srp_add_pending_descriptor */\r
+/*!\r
+Puts pending send descriptor at tail of the pending list\r
+\r
+@param p_descriptors - pointer to the descriptors structure\r
+@param p_descriptor - pointer to the descriptor to add\r
+\r
+@return - none\r
+*/\r
+void\r
+srp_add_pending_descriptor(\r
+ IN srp_descriptors_t *p_descriptors,\r
+ IN srp_send_descriptor_t *p_descriptor )\r
+{\r
+ SRP_ENTER( SRP_DBG_DATA );\r
+ __srp_add_descriptor( p_descriptor, \r
+ &p_descriptors->pending_descriptors, &p_descriptors->pending_list_lock );\r
+ SRP_EXIT( SRP_DBG_DATA );\r
+}\r
+\r
+/* __srp_remove_send_descriptor */\r
+/*!\r
+Removes send descriptor from the sent list\r
+\r
+@param p_descriptors - pointer to the descriptors structure\r
+@param p_descriptor - pointer to the descriptor to add\r
+@param descriptors_list - pointer to the list\r
+\r
+@return - none\r
+*/\r
+inline\r
+void\r
+__srp_remove_send_descriptor(\r
+ IN srp_send_descriptor_t *p_descriptor,\r
+ IN cl_qlist_t *descriptors_list,\r
+ IN cl_spinlock_t *p_lock)\r
+{\r
+ SRP_ENTER( SRP_DBG_DATA );\r
\r
- cl_spinlock_acquire ( &p_descriptors->sent_list_lock );\r
+ cl_spinlock_acquire ( p_lock );\r
\r
- cl_qlist_insert_tail( &p_descriptors->sent_descriptors, &p_descriptor->list_item );\r
- CL_ASSERT( &p_descriptors->sent_descriptors == p_descriptor->list_item.p_list );\r
+ CL_ASSERT( descriptors_list == p_descriptor->list_item.p_list );\r
+ cl_qlist_remove_item( descriptors_list, &p_descriptor->list_item );\r
\r
- cl_spinlock_release ( &p_descriptors->sent_list_lock );\r
+ cl_spinlock_release ( p_lock );\r
\r
SRP_EXIT( SRP_DBG_DATA );\r
}\r
\r
+\r
/* srp_remove_send_descriptor */\r
/*!\r
Removes send descriptor from the sent list\r
IN srp_send_descriptor_t *p_descriptor )\r
{\r
SRP_ENTER( SRP_DBG_DATA );\r
+ __srp_remove_send_descriptor( p_descriptor, \r
+ &p_descriptors->sent_descriptors, &p_descriptors->sent_list_lock );\r
+ SRP_EXIT( SRP_DBG_DATA );\r
+}\r
\r
- cl_spinlock_acquire ( &p_descriptors->sent_list_lock );\r
-\r
- CL_ASSERT( &p_descriptors->sent_descriptors == p_descriptor->list_item.p_list );\r
- cl_qlist_remove_item( &p_descriptors->sent_descriptors, &p_descriptor->list_item );\r
+/* srp_remove_pending_descriptor */\r
+/*!\r
+Removes pending send descriptor from the sent list\r
\r
- cl_spinlock_release ( &p_descriptors->sent_list_lock );\r
+@param p_descriptors - pointer to the descriptors structure\r
+@param p_descriptor - pointer to the descriptor to add\r
\r
+@return - none\r
+*/\r
+inline\r
+void\r
+srp_remove_pending_descriptor(\r
+ IN srp_descriptors_t *p_descriptors,\r
+ IN srp_send_descriptor_t *p_descriptor )\r
+{\r
+ SRP_ENTER( SRP_DBG_DATA );\r
+ __srp_remove_send_descriptor( p_descriptor, \r
+ &p_descriptors->pending_descriptors, &p_descriptors->pending_list_lock );\r
SRP_EXIT( SRP_DBG_DATA );\r
}\r
\r
-/* srp_remove_lun_head_send_descriptor */\r
+/* __srp_remove_lun_head_send_descriptor */\r
/*!\r
-Removes and returns the send descriptor from the head of the sent list for the lun specified\r
+Removes and returns the send descriptor from the head of the a list for the lun specified\r
\r
@param p_descriptors - pointer to the descriptors structure\r
@param lun - lun for which to remove head send descriptor\r
+@param descriptors_list - pointer to the list\r
\r
@return - srp_send_descriptor at head of sent list or NULL if empty\r
*/\r
srp_send_descriptor_t*\r
-srp_remove_lun_head_send_descriptor(\r
- IN srp_descriptors_t *p_descriptors,\r
- IN UCHAR lun )\r
+__srp_remove_lun_head_send_descriptor(\r
+ IN UCHAR lun,\r
+ IN cl_qlist_t *descriptors_list,\r
+ IN cl_spinlock_t *p_lock)\r
{\r
srp_send_descriptor_t *p_descriptor;\r
\r
SRP_ENTER( SRP_DBG_DATA );\r
\r
- cl_spinlock_acquire ( &p_descriptors->sent_list_lock );\r
+ cl_spinlock_acquire ( p_lock );\r
\r
- p_descriptor = (srp_send_descriptor_t *)cl_qlist_head( &p_descriptors->sent_descriptors );\r
- CL_ASSERT( &p_descriptors->sent_descriptors == p_descriptor->list_item.p_list );\r
+ p_descriptor = (srp_send_descriptor_t *)cl_qlist_head( descriptors_list );\r
+ CL_ASSERT( descriptors_list == p_descriptor->list_item.p_list );\r
\r
- while ( p_descriptor != (srp_send_descriptor_t *)cl_qlist_end( &p_descriptors->sent_descriptors ) )\r
+ while ( p_descriptor != (srp_send_descriptor_t *)cl_qlist_end( descriptors_list ) )\r
{\r
if ( p_descriptor->p_srb->Lun == lun )\r
{\r
- CL_ASSERT( &p_descriptors->sent_descriptors == p_descriptor->list_item.p_list );\r
- cl_qlist_remove_item( &p_descriptors->sent_descriptors, &p_descriptor->list_item );\r
+ CL_ASSERT( descriptors_list == p_descriptor->list_item.p_list );\r
+ cl_qlist_remove_item( descriptors_list, &p_descriptor->list_item );\r
break;\r
}\r
\r
p_descriptor = (srp_send_descriptor_t *)cl_qlist_next( &p_descriptor->list_item );\r
- CL_ASSERT( &p_descriptors->sent_descriptors == p_descriptor->list_item.p_list );\r
+ CL_ASSERT( descriptors_list == p_descriptor->list_item.p_list );\r
}\r
\r
- if ( p_descriptor == (srp_send_descriptor_t *)cl_qlist_end( &p_descriptors->sent_descriptors ) )\r
+ if ( p_descriptor == (srp_send_descriptor_t *)cl_qlist_end( descriptors_list ) )\r
{\r
p_descriptor = NULL;\r
}\r
\r
- cl_spinlock_release ( &p_descriptors->sent_list_lock );\r
+ cl_spinlock_release ( p_lock );\r
+\r
+ SRP_EXIT( SRP_DBG_DATA );\r
+\r
+ return ( p_descriptor );\r
+}\r
+\r
+\r
+/* srp_remove_lun_head_send_descriptor */\r
+/*!\r
+Removes and returns the send descriptor from the head of the sent list for the lun specified\r
+\r
+@param p_descriptors - pointer to the descriptors structure\r
+@param lun - lun for which to remove head send descriptor\r
\r
+@return - srp_send_descriptor at head of sent list or NULL if empty\r
+*/\r
+srp_send_descriptor_t*\r
+srp_remove_lun_head_send_descriptor(\r
+ IN srp_descriptors_t *p_descriptors,\r
+ IN UCHAR lun )\r
+{\r
+ srp_send_descriptor_t *p_descriptor;\r
+\r
+ SRP_ENTER( SRP_DBG_DATA );\r
+ p_descriptor = __srp_remove_lun_head_send_descriptor( \r
+ lun, &p_descriptors->sent_descriptors, &p_descriptors->sent_list_lock );\r
+ SRP_EXIT( SRP_DBG_DATA );\r
+\r
+ return ( p_descriptor );\r
+}\r
+\r
+/* srp_remove_lun_head_pending_descriptor */\r
+/*!\r
+Removes and returns the send descriptor from the head of the sent list for the lun specified\r
+\r
+@param p_descriptors - pointer to the descriptors structure\r
+@param lun - lun for which to remove head send descriptor\r
+\r
+@return - srp_send_descriptor at head of sent list or NULL if empty\r
+*/\r
+srp_send_descriptor_t*\r
+srp_remove_lun_head_pending_descriptor(\r
+ IN srp_descriptors_t *p_descriptors,\r
+ IN UCHAR lun )\r
+{\r
+ srp_send_descriptor_t *p_descriptor;\r
+\r
+ SRP_ENTER( SRP_DBG_DATA );\r
+ p_descriptor = __srp_remove_lun_head_send_descriptor( \r
+ lun, &p_descriptors->pending_descriptors, &p_descriptors->pending_list_lock );\r
SRP_EXIT( SRP_DBG_DATA );\r
\r
return ( p_descriptor );\r
p_send_descriptor->ds[0].vaddr = p_srp_conn_info->vaddr + physical_address.QuadPart;\r
p_send_descriptor->ds[0].length = p_srp_conn_info->init_to_targ_iu_sz;\r
p_send_descriptor->ds[0].lkey = p_srp_conn_info->lkey;\r
+ p_send_descriptor->p_fmr_el = NULL;\r
\r
SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA,\r
("hca vaddr = 0x%I64x.\n", p_srp_conn_info->vaddr));\r
typedef struct _srp_send_descriptor\r
{\r
/* Leave this as first member variable */\r
- cl_list_item_t list_item;\r
- ib_send_wr_t wr;\r
- uint64_t tag;\r
- SCSI_REQUEST_BLOCK *p_srb;\r
- ib_local_ds_t ds[SRP_NUM_SGE];\r
- uint8_t data_segment[SRP_MAX_IU_SIZE];\r
-} srp_send_descriptor_t;\r
+ cl_list_item_t list_item;\r
+ ib_send_wr_t wr;\r
+ uint64_t tag;\r
+ SCSI_REQUEST_BLOCK *p_srb;\r
+ mlnx_fmr_pool_el_t p_fmr_el;\r
+ ib_local_ds_t ds[SRP_NUM_SGE];\r
+ /* must be the last*/\r
+ uint8_t data_segment[SRP_MAX_IU_SIZE];\r
+}srp_send_descriptor_t;\r
\r
typedef struct _srp_recv_descriptor\r
{\r
- ib_recv_wr_t wr;\r
- ib_local_ds_t ds[SRP_NUM_SGE];\r
- uint8_t *p_data_segment;\r
-} srp_recv_descriptor_t;\r
+ ib_recv_wr_t wr;\r
+ ib_local_ds_t ds[SRP_NUM_SGE];\r
+ uint8_t *p_data_segment;\r
+}srp_recv_descriptor_t;\r
\r
typedef struct _srp_descriptors\r
{\r
\r
cl_spinlock_t sent_list_lock;\r
cl_qlist_t sent_descriptors;\r
+ cl_spinlock_t pending_list_lock;\r
+ cl_qlist_t pending_descriptors;\r
\r
uint32_t recv_descriptor_count;\r
srp_recv_descriptor_t *p_recv_descriptors_array;\r
IN srp_descriptors_t *p_descriptors,\r
IN UCHAR lun );\r
\r
+srp_send_descriptor_t*\r
+srp_remove_lun_head_pending_descriptor(\r
+ IN srp_descriptors_t *p_descriptors,\r
+ IN UCHAR lun );\r
+\r
+void\r
+srp_add_pending_descriptor(\r
+ IN srp_descriptors_t *p_descriptors,\r
+ IN srp_send_descriptor_t *p_descriptor );\r
+\r
ib_api_status_t\r
srp_post_send_descriptor(\r
IN srp_descriptors_t *p_descriptors,\r
\r
uint32_t g_srp_dbg_level = TRACE_LEVEL_ERROR;\r
uint32_t g_srp_dbg_flags = 0x0000ffff;\r
+uint32_t g_srp_mode_flags = 0;\r
\r
char g_srb_function_name[][32] =\r
{\r
__srp_free(\r
IN cl_obj_t *p_obj );\r
\r
+#if DBG\r
+\r
+void\r
+srp_x_print(\r
+ IN void *p_session );\r
+\r
+void\r
+srp_x_clean(\r
+ IN void *p_session );\r
+\r
+void *gp_session = NULL;\r
+\r
+#endif\r
\r
\r
static NTSTATUS\r
{\r
NTSTATUS status;\r
/* Remember the terminating entry in the table below. */\r
- RTL_QUERY_REGISTRY_TABLE table[3];\r
+ RTL_QUERY_REGISTRY_TABLE table[4];\r
UNICODE_STRING param_path;\r
\r
SRP_ENTER( SRP_DBG_PNP );\r
table[1].DefaultType = REG_DWORD;\r
table[1].DefaultData = &g_srp_dbg_flags;\r
table[1].DefaultLength = sizeof(ULONG);\r
+\r
+ table[2].Flags = RTL_QUERY_REGISTRY_DIRECT;\r
+ table[2].Name = L"ModeFlags";\r
+ table[2].EntryContext = &g_srp_mode_flags;\r
+ table[2].DefaultType = REG_DWORD;\r
+ table[2].DefaultData = &g_srp_mode_flags;\r
+ table[2].DefaultLength = sizeof(ULONG);\r
+\r
+\r
/* Have at it! */\r
status = RtlQueryRegistryValues( RTL_REGISTRY_ABSOLUTE, \r
param_path.Buffer, table, NULL, NULL );\r
("KeReleaseMutex status = %d.\n", release_status) );\r
}\r
\r
+ #if DBG\r
+ /* statistics */\r
+\r
+ /* this function is called sometimes in the begging of the test with \r
+ IRP_MN_QUERY_DEVICE_RELATIONS (7) request. Use this fact to print the statistics */\r
+ {\r
+ /* sometimes it's called once in 50msec, so we'll print once in 20 times */\r
+ static int interval = 40; /* 2 sec */\r
+ static int cnt = 0;\r
+ if (++cnt >= interval)\r
+ {\r
+ cnt = 0;\r
+ srp_x_print( gp_session );\r
+ srp_x_clean( gp_session );\r
+ }\r
+ }\r
+ \r
+ #endif\r
+\r
SRP_EXIT( SRP_DBG_PNP );\r
return status;\r
}\r
}\r
else\r
{\r
- p_config->NumberOfPhysicalBreaks = MIN( p_ext->p_hba->max_sg - 1, p_config->NumberOfPhysicalBreaks );\r
+ if (g_srp_mode_flags & SRP_MODE_SG_UNLIMITED)\r
+ // It is prohibited by DDK, but seems like work\r
+ p_config->NumberOfPhysicalBreaks = p_ext->p_hba->max_sg - 1;\r
+ else\r
+ p_config->NumberOfPhysicalBreaks = MIN( p_ext->p_hba->max_sg - 1, p_config->NumberOfPhysicalBreaks );\r
}\r
\r
SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG,\r
- ("NumberOfPhysicalBreaks set to = %d.\n", p_config->NumberOfPhysicalBreaks) );\r
+ ( "max_sg %d, New NumberOfPhysicalBreaks %d\n", \r
+ p_ext->p_hba->max_sg, p_config->NumberOfPhysicalBreaks));\r
\r
SRP_EXIT( SRP_DBG_PNP );\r
return SP_RETURN_FOUND;\r
/* Amount of physical memory to register. */\r
#define MEM_REG_SIZE 0xFFFFFFFFFFFFFFFF\r
\r
+\r
/* srp_open_ca */\r
/*!\r
Open the channel adapter associated with the SRP initiator\r
ib_api_status_t status;\r
ib_phys_create_t phys_create;\r
ib_phys_range_t phys_range;\r
-\r
+ mlnx_fmr_pool_create_t fmr_pool_create;\r
+ \r
SRP_ENTER( SRP_DBG_PNP );\r
\r
status = p_hca->p_hba->ifc.open_ca( p_hca->p_hba->h_al,\r
\r
p_hca->vaddr = 0;\r
\r
+ \r
status = p_hca->p_hba->ifc.reg_phys( p_hca->h_pd,\r
&phys_create,\r
&p_hca->vaddr,\r
&p_hca->lkey,\r
&p_hca->rkey,\r
&p_hca->h_mr );\r
+\r
if( status != IB_SUCCESS )\r
{\r
SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
("Physical Memory Registration Failure. Status = %d\n", status) );\r
-exit:\r
- srp_close_ca( p_hca );\r
+ goto exit;\r
}\r
\r
- SRP_EXIT( SRP_DBG_PNP );\r
+ fmr_pool_create.max_pages_per_fmr = SRP_MAX_SG_IN_INDIRECT_DATA_BUFFER;\r
+ fmr_pool_create.page_size = 12;\r
+ fmr_pool_create.access_ctrl = IB_AC_LOCAL_WRITE | IB_AC_RDMA_READ | IB_AC_RDMA_WRITE;\r
+ fmr_pool_create.pool_size = 100;\r
+ fmr_pool_create.dirty_watermark = 2;\r
+ fmr_pool_create.flush_function = NULL;\r
+ fmr_pool_create.flush_arg = NULL;\r
+ fmr_pool_create.cache = TRUE;\r
+\r
+ status = p_hca->p_hba->ifc.create_mlnx_fmr_pool(p_hca->h_pd, &fmr_pool_create, &p_hca->h_fmr_pool);\r
\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,\r
+ ("FMR pool creation Failure. Status = %d\n", status) );\r
+ goto exit;\r
+ }\r
+\r
+ p_hca->fmr_page_size = 1<< fmr_pool_create.page_size;\r
+ p_hca->fmr_page_shift = (uint32_t)fmr_pool_create.page_size;\r
+ \r
+ SRP_EXIT( SRP_DBG_PNP );\r
+ return IB_SUCCESS;\r
+exit:\r
+ srp_close_ca( p_hca );\r
+ \r
+ SRP_EXIT( SRP_DBG_PNP );\r
return ( status );\r
}\r
\r
return ( IB_SUCCESS );\r
}\r
\r
+\r
ib_ca_handle_t h_ca;\r
ib_pd_handle_t h_pd;\r
ib_mr_handle_t h_mr;\r
- uint64_t vaddr;\r
+ mlnx_fmr_pool_handle_t h_fmr_pool;\r
+ uint32_t fmr_page_size;\r
+ uint32_t fmr_page_shift;\r
+ uint64_t vaddr;\r
net32_t lkey;\r
net32_t rkey;\r
\r
#include "srp_session.h"\r
#include <stdlib.h>\r
\r
+#if DBG\r
+extern void *gp_session;\r
+#endif\r
+\r
/* __srp_destroying_session */\r
/*!\r
Called when session has been marked for destruction\r
("After Insert Rel Session Object ref_cnt = %d\n",\r
p_srp_session->obj.ref_cnt) );\r
\r
+#if DBG\r
+ gp_session = p_srp_session;\r
+#endif\r
+\r
exit:\r
SRP_EXIT( SRP_DBG_SESSION );\r
\r
(uint8_t)p_srp_session->p_hba->ioc_info.profile.send_msg_depth,\r
p_srp_session );\r
\r
+ if ( status != IB_SUCCESS )\r
+ { // clean resources, taken upon login\r
+ srp_close_ca( &p_srp_session->hca );\r
+ srp_destroy_descriptors( &p_srp_session->descriptors );\r
+ }\r
+\r
exit:\r
SRP_EXIT( SRP_DBG_SESSION );\r
return ( status );\r
cl_obj_rel_t rel;\r
\r
srp_hba_t *p_hba;\r
-\r
+ atomic32_t repost_is_on;\r
srp_hca_t hca;\r
srp_connection_t connection;\r
srp_descriptors_t descriptors;\r
\r
SCSI_REQUEST_BLOCK *p_shutdown_srb;\r
\r
+#if DBG\r
+ /* statistics */\r
+\r
+ /* packets, built */\r
+ uint64_t x_pkt_fmr; /* number of packets, mapped by fmr_pool */\r
+ uint64_t x_pkt_built; /* number of packets, built */\r
+\r
+ /* request_limit_delta */\r
+ int64_t x_rld_total; /* sum of req_limit_delta values */\r
+ int32_t x_rld_num; /* number of req_limit_delta values */\r
+ int32_t x_rld_max; /* max req_limit_delta value */\r
+ int32_t x_rld_min; /* min req_limit_delta value */\r
+ int32_t x_rld_zeroes; /* number of zeroes */\r
+\r
+ int32_t x_rld_zeroes_cur; /* number of zeroes */\r
+ int32_t x_rld_zeroes_cur_min; /* number of zeroes */\r
+ int32_t x_rld_busy_success; \r
+ int32_t x_rld_busy_fail; \r
+\r
+ /* pending queue */\r
+ uint64_t x_pend_total; /* sum of pending_descriptors queue sizes */\r
+ uint32_t x_pend_num; /* number of pending_descriptors queue sizes */\r
+ uint32_t x_pend_max; /* max pending_descriptors queue size */\r
+\r
+ /* pending queue */\r
+ uint64_t x_sent_total; /* sum of sent_descriptors queue sizes */\r
+ uint32_t x_sent_num; /* number of sent_descriptors queue sizes */\r
+ uint32_t x_sent_max; /* max sent_descriptors queue size */\r
+\r
+ uint32_t x_req_limit; /* max number in-flight packets */\r
+#endif \r
} srp_session_t;\r
\r
srp_session_t*\r